query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
`check_for_fit` wraps a method that validates if `self._is_fitted` is `True`. It raises an exception if `False` and calls and returns the wrapped method if `True`. | def check_for_fit(cls, method):
@wraps(method)
def _check_for_fit(self, X=None, y=None):
klass = type(self).__name__
if not self._is_fitted and self.needs_fitting:
raise ComponentNotYetFittedError(
f"This {klass} is not fitted yet. You must fit {klass} before calling {method.__name__}."
)
else:
return method(self, X, y)
return _check_for_fit | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_for_fit(cls, method):\n\n @wraps(method)\n def _check_for_fit(self, *args, **kwargs):\n klass = type(self).__name__\n if not self._is_fitted:\n raise PipelineNotYetFittedError(\n f\"This {klass} is not fitted yet. You must fit {klass} before calling {method.__name__}.\"\n )\n\n return method(self, *args, **kwargs)\n\n return _check_for_fit",
"def check_if_it_can_fit(object):\n if hasattr(object, \"fit\") and hasattr(object, \"predict\") and hasattr(object, \"get_params\") and hasattr(object,\n \"set_params\"):\n return object\n else:\n raise Exception(\"Pass an estimator that has methods fit predict set_params get_params\")",
"def _check_is_fitted(self):\n check_is_fitted(self, ['w', 'b'])",
"def checkIsValid(f):\n\n @wraps(f)\n def wrapper(self, *args, **kwargs):\n if self.validator.isValid:\n return f(self, *args, **kwargs)\n else:\n error = self.validator._exceptionClass('Called: {} method before data validated'.format(f.__name__))\n self.validator._errors[f.__name__] = error\n if self.validator._errorHandler is not None:\n self.validator._errorHandler(error, self.getValidationContext())\n return\n\n return wrapper",
"def is_fitted(self):\n\n return self.isFitted",
"def test_check_is_fitted_call(self):\n\n x = BaseTransformer(columns=None)\n\n with mock.patch(\"tubular.base.check_is_fitted\") as mocked_method:\n\n attributes = \"columns\"\n\n x.check_is_fitted(attributes)\n\n assert (\n mocked_method.call_count == 1\n ), f\"Incorrect number of calls to tubular.base.check_is_fitted -\\n Expected: 1\\n Actual: {mocked_method.call_count}\"\n\n call_1_args = mocked_method.call_args_list[0]\n call_1_pos_args = call_1_args[0]\n call_1_kwargs = call_1_args[1]\n\n h.assert_dict_equal_msg(\n actual=call_1_kwargs,\n expected={},\n msg_tag=\"Keyword arg assert for tubular.base.check_is_fitted\",\n )\n\n assert (\n len(call_1_pos_args) == 2\n ), f\"Incorrect number of positional arguments in check_is_fitted call -\\n Expected: 2\\n Actual: {len(call_1_pos_args)}\"\n\n assert (\n call_1_pos_args[0] is x\n ), f\"Incorrect first positional arg in check_is_fitted call -\\n Expected: {x}\\n Actual: {call_1_pos_args[0]}\"\n\n assert (\n call_1_pos_args[1] == attributes\n ), f\"Incorrect second positional arg in check_is_fitted call -\\n Expected: {attributes}\\n Actual: {call_1_pos_args[1]}\"",
"def _check_if_fitted(self):\n if not self.fitted:\n raise AssertionError('Model is not fitted! Fit the model to a '\n 'dataset before attempting to plot results.')",
"def _check_is_fitted(self):\n # Do not check `b` as some classifiers do not set it\n check_is_fitted(self, 'w')\n super(CClassifierLinear, self)._check_is_fitted()",
"def _check_if_estimator(estimator):\n msg = (\"This %(name)s instance has no attribute \\\"fit\\\".\")\n if not hasattr(estimator, \"fit\"):\n raise AttributeError(msg % {'name': type(estimator).__name__})",
"def requires_training_wheels(method):\n\n def wrapper(model, *args, **kwargs):\n if not model.is_trained:\n raise TypeError(\"the model needs training first\")\n return method(model, *args, **kwargs)\n return wrapper",
"def is_fitted(self):\n return self.__fdata is not None",
"def __fit(self, function_handle):\n try:\n function_handle()\n return True\n except KeyError as e:\n if 'Floating-point under-/overflow occurred at epoch' in \\\n e.args[0] or \\\n 'removed all features' in e.args[0] or \\\n 'failed to create intent' in e.args[0]:\n pass\n else:\n traceback.print_exc()\n raise e\n except ValueError as e:\n if 'Floating-point under-/overflow occurred at epoch' in e.args[0]:\n pass\n elif 'removed all features' in e.args[0]:\n pass\n elif 'failed to create intent' in e.args[0]:\n pass\n else:\n raise e\n except LinAlgError as e:\n if 'not positive definite, even with jitter' in e.args[0]:\n pass\n else:\n raise e\n except RuntimeWarning as e:\n if 'invalid value encountered in sqrt' in e.args[0]:\n pass\n elif 'divide by zero encountered in divide' in e.args[0]:\n pass\n else:\n raise e\n except UserWarning as e:\n if 'FastICA did not converge' in e.args[0]:\n pass\n else:\n raise e",
"def fit(self, **kwargs):\n if self.fit_method is not None:\n fit_kwargs = self._fit_params.copy()\n fit_kwargs.update(kwargs)\n fit_kwargs = self._get_method_kwargs(fit_kwargs, self.fit_args)\n getattr(self.instance, self.fit_method)(**fit_kwargs)",
"def _check_if_fitted(self):\n if self.covar_module is None:\n raise RuntimeError(\n \"Model has not been fitted. You need to call \"\n \"`fit_fully_bayesian_model_nuts` to fit the model.\"\n )",
"def _verify_fit(self) -> None:\n if not hasattr(self, 'X_train') or not hasattr(self, 'Y_train'):\n raise ValueError('Training data not set. Call `fit` and pass training data first.')",
"def test_super_fit_call(self, mocker):\n\n df = d.create_df_2()\n\n x = ScalingTransformer(columns=[\"a\"], scaler=\"standard\")\n\n expected_call_args = {0: {\"args\": (d.create_df_2(), None), \"kwargs\": {}}}\n\n with h.assert_function_call(\n mocker, tubular.base.BaseTransformer, \"fit\", expected_call_args\n ):\n\n x.fit(df)",
"def has_fit(client: NumPyClient) -> bool:\n return type(client).fit != NumPyClient.fit",
"def test_fit_returns_self(self):\n\n df = d.create_df_1()\n\n x = BaseTransformer(columns=\"a\")\n\n x_fitted = x.fit(df)\n\n assert x_fitted is x, \"Returned value from BaseTransformer.fit not as expected.\"",
"def test_apply_validation(self, has_validation):\n called_with = None\n def validator(*args, **kwargs):\n nonlocal called_with\n called_with = CallArguments(*args, **kwargs)\n\n ctx, name, value = object(), 'myparam', object()\n\n fparam = FParameter(\n POSITIONAL_ONLY,\n name=name,\n validator=validator if has_validation else None,\n )\n fparam.apply_validation(ctx, value)\n if has_validation:\n assert called_with.args == (ctx, name, value)\n else:\n assert called_with is None",
"def test_with_fitted(self):\n X, y = make_blobs(\n n_samples=100, n_features=5, centers=3, shuffle=False, random_state=112\n )\n model = MiniBatchKMeans().fit(X)\n labels = model.predict(X)\n\n with mock.patch.object(model, \"fit\") as mockfit:\n oz = SilhouetteVisualizer(model)\n oz.fit(X)\n mockfit.assert_not_called()\n\n with mock.patch.object(model, \"fit\") as mockfit:\n oz = SilhouetteVisualizer(model, is_fitted=True)\n oz.fit(X)\n mockfit.assert_not_called()\n\n with mock.patch.object(model, \"fit_predict\", return_value=labels) as mockfit:\n oz = SilhouetteVisualizer(model, is_fitted=False)\n oz.fit(X)\n mockfit.assert_called_once_with(X, None)",
"def __call__(self): # run test\n\n try: # Check if any errors were raised during calling of self.func\n return abs(self.func(*self.args, **self.kwargs) - self.res) < self._tolerance\n\n except IndexError:\n return False",
"def _check_fitted(self):\n assert self.subspace_basis is not None, \\\n 'You must fit %s before you can project' % self.__class__.__name__",
"def _check_fitted(self):\n assert self.subspace_basis is not None, \\\n 'You must fit %s before you can project' % self.__class__.__name__",
"def fit(self):\n raise NotImplementedError('')",
"def requires_training_wheels(f):\n def wrapper(model, *args, **kwargs):\n if not model._trained:\n raise TypeError(\"the model needs training first\")\n return f(model, *args, **kwargs)\n return wrapper",
"def calculate_goodness_of_fit(estimate, reference, fit_metric='ad', num_samples=100, _random_state=None):\n\n try:\n _check_ensembles_contain_correct_number_of_distributions(estimate, reference)\n except ValueError: #pragma: no cover - unittest coverage for _check_ensembles_contain_correct_number_of_distributions is complete\n logging.warning(\"The ensemble `reference` should have 1 or N distributions. With N = number of distributions in the ensemble `estimate`.\")\n\n try:\n _check_ensemble_is_not_nested(estimate)\n except ValueError: #pragma: no cover - unittest coverage for _check_ensemble_is_not_nested is complete\n logging.warning(\"Each element in the ensemble `estimate` must be a single distribution.\")\n\n try:\n _check_ensemble_is_not_nested(reference)\n except ValueError: #pragma: no cover - unittest coverage for _check_ensemble_is_not_nested is complete\n logging.warning(\"Each element in the ensemble `reference` must be a single distribution.\")\n\n if fit_metric not in goodness_of_fit_metrics:\n metrics = list(goodness_of_fit_metrics.keys())\n raise KeyError(f\"`fit_metric` should be one of {metrics}.\")\n\n return goodness_of_fit_metrics[fit_metric](\n reference,\n np.squeeze(estimate.rvs(size=num_samples, random_state=_random_state))\n )",
"def _fit(self, dataset):\n raise NotImplementedError()",
"def update(self, fit, curvature):\n if fit is None:\n self.detected = False\n else:\n if self.best_fit is None:\n self._accept_fit(fit, curvature)\n else:\n # We have a previous best fit. Compare it with the incoming one\n if self._sanity_check(fit):\n self._accept_fit(fit, curvature)\n else:\n self.detected = False",
"def fit(self, *, timeout: float = None, iterations: int = None) -> CallResult[None]:\n\t\treturn super().fit()",
"def fit(self):\n raise NotImplementedError"
]
| [
"0.768455",
"0.6352288",
"0.62810916",
"0.60455644",
"0.60128045",
"0.5980315",
"0.59594214",
"0.5826365",
"0.5740475",
"0.56470776",
"0.5631925",
"0.56311876",
"0.5618889",
"0.5577201",
"0.55751395",
"0.5490103",
"0.54883724",
"0.53121126",
"0.52988595",
"0.5286061",
"0.52708346",
"0.5252256",
"0.5252256",
"0.52406013",
"0.52315724",
"0.5218983",
"0.5218795",
"0.5215343",
"0.51942784",
"0.5184563"
]
| 0.7850691 | 0 |
Obtains max statelevel fuel price | def get_max_fp(state_abbr, fuel_type="NG", year=False):
if(not year):
year = UpdateParams.today.year
if fuel_type.upper() == "NG":
series_ID = "NG.N3035" + state_abbr + "3.A"
elif fuel_type.upper() == "COAL":
series_ID = "COAL.COST." + state_abbr + "-10.A"
elif fuel_type.upper() == "PETRO":
series_ID = "PET.EMA_EPPR_PWA_S" + state_abbr + "_DPG.A"
else:
raise AssertionError("Please input a valid fuel_type")
# Check if state-level available, if not return USA price
try:
fuel_series = UpdateParams.api.data_by_series(series=series_ID)
dict_key = list(fuel_series.keys())[0]
# if fuel price in state is empty return national price
if all(v is None for v in list(fuel_series[dict_key].values())):
return 0.0
except KeyError:
return 0.0
j = 0
while True:
try:
return fuel_series[dict_key][str(year-j) + " "] / 1.0
break
except:
j += 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def state_max(self) -> float:\n raise NotImplementedError",
"def max_price(self):\n return self._max_price",
"def get_greatest_stock_price():\n greatest_stock_price = 0\n // your code here",
"def __find_max_price(self):\n prices_map = map(\n lambda iceberg: utils.get_actual_penguin_amount(\n self.__game, iceberg),\n self.__game.get_all_icebergs()\n )\n return max(prices_map)",
"def _maximum(self) -> float:\n if self._type == \"power\":\n return 5.0\n elif self._type == \"setpoint\":\n return self._product.get_data_config_json()[\"_value_setpoint_max\"]\n elif self._type == \"fan1\":\n fan = 1\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]\n elif self._type == \"fan2\":\n fan = 2\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]\n elif self._type == \"fan3\":\n fan = 3\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]",
"def get_fuel_price(state_abbr, fuel_type=\"NG\", year=False):\n\n if(not year):\n\n year = UpdateParams.today.year\n\n if fuel_type.upper() == \"NG\":\n\n series_ID = \"NG.N3035\" + state_abbr + \"3.A\"\n \n series_USA = \"NG.RNGWHHD.A\"\n \n series_LA = UpdateParams.api.data_by_series(series=\"NG.N3035\" + \"LA\" + \"3.A\")\n \n dict_key_LA = list(series_LA.keys())[0]\n\n elif fuel_type.upper() == \"COAL\":\n\n series_ID = \"COAL.COST.\" + state_abbr + \"-10.A\"\n\n series_USA = \"COAL.COST.US-10.A\"\n\n elif fuel_type.upper() == \"PETRO\":\n # state level wholesale/resale price data ends 2011\n series_ID = \"PET.EMA_EPPR_PWA_S\" + state_abbr + \"_DPG.A\"\n\n series_USA = \"PET.EMA_EPPR_PWG_NUS_DPG.A\"\n\n else:\n raise AssertionError(\"Please input a valid fuel_type\")\n\n fuel_series_USA = UpdateParams.api.data_by_series(series=series_USA)\n \n dict_key_USA = list(fuel_series_USA.keys())[0]\n \n # find latest USA value\n i = 0\n\n while True:\n \n try:\n fp_USA = fuel_series_USA[dict_key_USA][str(year-i) + \" \"] / 1.0\n\n break\n\n except:\n \n i += 1\n\n # Check if state-level available, if not return USA price\n try:\n fuel_series = UpdateParams.api.data_by_series(series=series_ID)\n\n dict_key = list(fuel_series.keys())[0]\n\n # if fuel price in state is empty return national price\n if all(v is None for v in list(fuel_series[dict_key].values())):\n \n return (fp_USA, year-i)\n \n except KeyError:\n \n return (fp_USA, year-i)\n\n j = 0\n\n # find latest year for state\n while True:\n\n try:\n fp_state = fuel_series[dict_key][str(year-j) + \" \"] / 1.0\n\n break\n\n except:\n\n j += 1\n \n if fuel_type.upper() == \"NG\":\n # series_LA is just the actual series not a series ID\n fp_mult = fp_state / series_LA[dict_key_LA][str(year-j) + \" \"]\n return (fp_mult * fp_USA/1.037, year-j)\n \n # return USA value if 2 years more recent vs state\n if ((year-i) - (year-j) >= 2) | (fp_state >= fp_USA):\n \n return (fp_USA/1.037, year-i)\n\n return (fp_state, year-j)",
"def getValue(self, state):\n \"\"\"Description:\n first get legal actions of current state and find the max q-value among all legalaction. \n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n legalActions = self.getLegalActions(state)\n if len(legalActions) == 0:\n return 0.0\n maxValues = max([ self.getQValue(state, a) for a in legalActions])\n return maxValues\n \n \"\"\" END CODE \"\"\"",
"def fuel_calc(mass):\n return max((mass / 3) - 2, 0)",
"def get_value(self, state):\n possible_actions = self.get_legal_actions(state)\n\n # If there are no legal actions, return 0.0\n if len(possible_actions) == 0:\n return 0.0\n\n #\n # INSERT CODE HERE to get maximum possible value for a given state\n #\n\n max_value = self.get_qvalue(state, possible_actions[0])\n for action in possible_actions[1:]:\n qvalue = self.get_qvalue(state, action)\n if qvalue > max_value:\n max_value = qvalue\n\n return max_value",
"def stock_max(stock):\n max_price=0\n for i in stock['Close']:\n if i > max_price:\n max_price=i\n return max_price",
"def _get_max_expense(self):\n pass",
"def get_max(self):\r\n df = pd.read_csv(\"MonthlyRate.csv\")\r\n df = df[df.CurrencyCode == self.choice]\r\n maximum = df.max(axis=1).values[0]\r\n # Round the value to 4 d.p.\r\n maximum = round(float(maximum), 4)\r\n return maximum",
"def max_level(self):\n return self.__max",
"def get_max(self):\n return self.serie.max()",
"def _get_maximum(self):\n return self._maximum",
"def _maximum(self) -> float:\n return self._config[CONF_MAX]",
"def cost(self):\r\n try:\r\n return dict(RPEvent.LARGESSE_VALUES)[\r\n int(self.data.get(\"celebration_tier\", 0))\r\n ][0]\r\n except (KeyError, TypeError, ValueError):\r\n self.add_error(\"celebration_tier\", \"Invalid largesse value.\")",
"def max_power(self):\r\n est_max_power = self.model * self.max_pwm / 100\r\n return est_max_power",
"def max_value(gameState):\n if terminal_test(gameState): return -1",
"def lambda_max(self):\n return const.b_wien / self.temperature",
"def f_max(cls):\n return cls.params[\"f_max\"]",
"def get_remaining_fuel(self):\n return min(self.liquid_fuel, self.oxidizer)",
"def getMaximumPreviousPrice(self, departureDate, state, datas):\n specificDatas = []\n specificDatas = [data for data in datas if data[\"Date\"]==departureDate]\n\n maximumPreviousPrice = util.getPrice(specificDatas[0][\"MinimumPrice\"])\n for data in specificDatas:\n if util.getPrice(data[\"MinimumPrice\"]) > maximumPreviousPrice and data[\"State\"]>=state:\n maximumPreviousPrice = util.getPrice(data[\"MinimumPrice\"])\n\n return maximumPreviousPrice",
"def max_value (self, new_state):\n \n ##create a list to save reward information\n return_list = []\n \n ##get each values from Q based on the new_state and its possible actions\n for s, a in self.Q.keys():\n if s == new_state:\n return_list.append(self.Q[s,a])\n \n ##return the maximum value based on new_state\n return max(return_list)",
"def max_score(self):\r\n return self.lcp.get_max_score()",
"def max_value(self, state, max_alpha, max_beta, max_depth):\r\n if state.terminal_test():\r\n return state.utility(0)\r\n if max_depth <=0 :\r\n return self.score(state)\r\n\r\n v = float(\"-inf\")\r\n for a in state.actions():\r\n v = max(v, self.min_value(state.result(a), max_alpha, max_beta, max_depth - 1))\r\n if v >= max_beta:\r\n return v\r\n max_alpha = max(max_alpha, v)\r\n return v",
"def max_profit(prices: List[int]) -> int:",
"def maxProfit(self, prices):\r\n\t\tprofit = 0",
"def eth_max_button_use_gas(gas_price, _coin=None):\n return pywalib.PyWalib.get_max_use_gas(gas_price)",
"def get_max_close(symbol):\n df = pd.read_csv(\"data/{}.csv\".format(symbol)) # read in data\n return df['Close'].max() # compute and return max"
]
| [
"0.7232872",
"0.71579266",
"0.695551",
"0.68795824",
"0.6802058",
"0.66905564",
"0.64502335",
"0.6434176",
"0.6378356",
"0.63419753",
"0.63411105",
"0.6247193",
"0.62242603",
"0.622073",
"0.62158287",
"0.6194649",
"0.6172671",
"0.61604536",
"0.6157182",
"0.61291057",
"0.6105317",
"0.60979646",
"0.6089683",
"0.608439",
"0.607045",
"0.606745",
"0.6059659",
"0.60456574",
"0.6040041",
"0.6037844"
]
| 0.7498042 | 0 |
Grabs fuel esc from EERC | def get_esc(state_abbr, fuel_type="NG"):
temp_dict = {"NG": "Natural Gas", "COAL" : "Coal", "PETRO": "Residual"}
return UpdateParams.eerc_esc.loc[state_abbr, temp_dict[fuel_type]] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_energy(self, zpe_scale_factor=1.):\n e_elect = None\n with open(self.path, 'r') as f:\n for line in f:\n if 'FINAL SINGLE POINT ENERGY' in line: # for all methods in Orca\n e_elect = float(line.split()[-1])\n if e_elect is None:\n raise LogError('Unable to find energy in Orca output file.')\n return e_elect * constants.E_h * constants.Na",
"def read_fermi(self):\n E_f=None\n for line in open('OUTCAR', 'r'):\n if line.rfind('E-fermi') > -1:\n E_f=float(line.split()[2])\n return E_f",
"def readenergyfile(filename):\n def parsemeta(metalines):\n \"\"\"Parse metadata lines to get metadata object (ordered dict)\n\n Allow only numbers, lists of numbers and strings\n \"\"\"\n def parseline(line):\n res = [val.strip() for val in line[5:].split(u':', 1)]\n key, value = (res[0], res[1]) if len(res) == 2 else (res[0], u'')\n if re.match(r'^-?\\d*[\\.|,]?\\d+$', value):\n value = float(value)\n elif re.match(r'^\\[(.*)\\]', value):\n value = [val.strip() for val in value[1:-1].split(u',')]\n value = [float(val) if re.match(r'^-?\\d*[\\.|,]?\\d+$', val) else val for val in value]\n return key, value\n return OrderedDict(parseline(line) for line in metalines if line.startswith(u'#CTE_'))\n\n with io.open(filename, 'r') as datafile:\n components, meta = [], []\n for ii, line in enumerate(datafile):\n line = line.strip()\n if (line == '') or line.startswith('vector'):\n continue\n elif line.startswith('#'):\n meta.append(line)\n else:\n fields = line.split('#', 1)\n data = [x.strip() for x in fields[0].split(',')]\n comment = fields[1] if len(fields) > 1 else ''\n carrier, ctype, originoruse = data[0:3]\n values = [float(v.strip()) for v in data[3:]]\n\n if ctype not in ('PRODUCCION', 'CONSUMO'):\n raise ValueError(\"Carrier type is not 'CONSUMO' or 'PRODUCCION' in line %i\\n\\t%s\" % (ii+2, line))\n if originoruse not in ('EPB', 'NEPB', 'INSITU', 'COGENERACION'):\n raise ValueError((\"Origin or end use is not 'EPB', 'NEPB', 'INSITU' or 'COGENERACION'\"\n \" in line %i\\n\\t%s\" % (ii+2, line)))\n\n components.append({ \"carrier\": carrier, \"ctype\": ctype,\n \"originoruse\": originoruse,\n \"values\": values, \"comment\": comment })\n numsteps = [len(c['values']) for c in components]\n if max(numsteps) != min(numsteps):\n raise ValueError(\"All input must have the same number of timesteps.\")\n return (parsemeta(meta), components)",
"def parse_e_elect(path: str,\n zpe_scale_factor: float = 1.,\n software: Optional[str] = None,\n ) -> Optional[float]:\n if not os.path.isfile(path):\n raise InputError(f'Could not find file {path}')\n if path.endswith('.yml'):\n content = read_yaml_file(path)\n if isinstance(content, dict) and 'sp' in content.keys():\n return content['sp']\n e_elect = None\n software = software or identify_ess(path)\n if software is not None and software.lower() == 'xtb':\n with open(path, 'r') as f:\n lines = f.readlines()\n for line in lines:\n if 'TOTAL ENERGY' in line:\n e_elect = hartree_to_si(float(line.split()[3]))\n return e_elect\n log = ess_factory(fullpath=path, check_for_errors=False)\n try:\n e_elect = log.load_energy(zpe_scale_factor) * 0.001 # convert to kJ/mol\n except (LogError, NotImplementedError):\n logger.warning(f'Could not read e_elect from {path}')\n return e_elect",
"def getEichFromEQ(self, ep, verbose=False):\n #assuming plasma is centered in machine here\n zMin = ep.g['ZmAxis'] - 0.25\n zMax = ep.g['ZmAxis'] + 0.25\n zWall = np.linspace(zMin, zMax, 1000)\n zLCFS = ep.g['lcfs'][:,1]\n #this prevents us from getting locations not at midplane\n idx = np.where(np.logical_and(zLCFS>zMin,zLCFS<zMax))\n Rmax = ep.g['lcfs'][:,0][idx].max()\n Rmin = ep.g['lcfs'][:,0][idx].min()\n # geometric quantities\n Rgeo = (Rmax + Rmin) / 2.0\n a = (Rmax - Rmin) / 2.0\n aspect = a/Rgeo\n\n #Regression 15\n C = 1.35\n Cp = -0.02\n Cr = 0.04\n Cb = -0.92\n Ca = 0.42\n # Evaluate Bp at outboard midplane\n Z_omp_sol = 0.0\n Bp = abs(ep.BpFunc.ev(Rmax,Z_omp_sol))\n #Evaluate lq\n self.lqEich = C * self.Psol**Cp * Rgeo**Cr * Bp**Cb * aspect**Ca # in mm\n Bt = abs(ep.BtFunc.ev(ep.g['RmAxis'],ep.g['ZmAxis']))\n if verbose==True:\n print(\"Poloidal Field at midplane: {:f}\".format(Bp))\n print(\"Toroidal Field at axis: {:f}\".format(Bt))\n print(\"Found heat flux width value of: {:f} mm\".format(self.lqEich))\n log.info(\"Found heat flux width value of: {:f} mm\".format(self.lqEich))\n return",
"def get_Ec(self):\n return self.Ec",
"def fetch_fuel_data():\n\treturn requests.get('http://www.fueleconomy.gov/ws/rest/fuelprices').text",
"def read_energy(self, fname):\n outfile = open(fname)\n lines = outfile.readlines()\n outfile.close()\n\n energy = None\n for line in lines:\n if line.find('HEAT OF FORMATION') != -1:\n words = line.split()\n energy = float(words[5])\n if line.find('H.o.F. per unit cell') != -1:\n words = line.split()\n energy = float(words[5])\n if line.find('UNABLE TO ACHIEVE SELF-CONSISTENCE') != -1:\n energy = None\n if energy is None:\n raise RuntimeError('MOPAC: could not find total energy')\n### do not change unit for mopac\n energy *= (kcal / mol)\n return energy",
"def E(self):\n return self.generic_getter(get_energy, \"E\", \"convert_energy\")",
"def get_nonselfconsistent_energies(self, bee_type):\n assert bee_type == 'beefvdw'\n p = os.popen('grep -32 \"BEEF xc energy contributions\" OUTCAR | tail -32','r')\n s = p.readlines()\n p.close()\n xc = np.array([])\n for i, l in enumerate(s):\n l_ = float(l.split(\":\")[-1])\n xc = np.append(xc, l_)\n assert len(xc) == 32\n return xc",
"def set_ec(self, etacalc):\n if not self.__thermodyn:\n C = np.zeros((6,6))\n \n LC = self.__structures.items()[0][1].LC\n if self.__mthd == 'Energy':\n if type(etacalc)==list:\n A2=[]\n for i in range(len(etacalc)):\n A2.append(self.__A2[i][etacalc[i]])\n else:\n if not etacalc in self.__A2[0].keys(): raise ValueError('Please coose one of %s'%(self.__A2[0].keys()))\n A2 = [a2[etacalc] for a2 in self.__A2]\n \n \n #%%%--- Cubic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'CI' or \\\n LC == 'CII'):\n C[0,0] =-2.*(A2[0]-3.*A2[1])/3.\n C[1,1] = C[0,0]\n C[2,2] = C[0,0]\n C[3,3] = A2[2]/6.\n C[4,4] = C[3,3]\n C[5,5] = C[3,3]\n C[0,1] = (2.*A2[0]-3.*A2[1])/3.\n C[0,2] = C[0,1]\n C[1,2] = C[0,1]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Hexagonal structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = 2./3.*A2[0] + 4./3.*A2[1] - 2.*A2[2] - 2.*A2[3]\n C[0,2] = 1./6.*A2[0] - 2./3.*A2[1] + 0.5*A2[2]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[2]\n C[3,3] =-0.5*A2[2] + 0.5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RI'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[0,4] = .5*(-A2[3] - A2[4] + A2[6])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[1,4] =-C[0,4] \n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[3,5] =-C[0,4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TI'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[3]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[3]\n C[0,2] = A2[0]/6.-2.*A2[1]/3.+.5*A2[3]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[3]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TII'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[4]\n C[1,1] = C[0,0]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[4]\n C[0,2] = A2[0]/6.-(2./3.)*A2[1]+.5*A2[4]\n C[0,5] = (-A2[2]+A2[3]-A2[6])/4.\n C[1,2] = C[0,2]\n C[1,5] =-C[0,5]\n C[2,2] = 2.*A2[4]\n C[3,3] = .5*A2[5]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[6]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Orthorhombic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'O'):\n C[0,0] = 2.*A2[0]/3.+4.*A2[1]/3.+A2[3]-2.*A2[4]-2.*A2[5]\n C[0,1] = 1.*A2[0]/3.+2.*A2[1]/3.-.5*A2[3]-A2[5]\n C[0,2] = 1.*A2[0]/3.-2.*A2[1]/3.+4.*A2[2]/3.-.5*A2[3]-A2[4]\n C[1,1] = 2.*A2[4]\n C[1,2] =-2.*A2[1]/3.-4.*A2[2]/3.+.5*A2[3]+A2[4]+A2[5]\n C[2,2] = 2.*A2[5]\n C[3,3] = .5*A2[6]\n C[4,4] = .5*A2[7]\n C[5,5] = .5*A2[8]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Monoclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'M'):\n C[0,0] = 2.*A2[0]/3.+8.*(A2[1]+A2[2])/3.-2.*(A2[5]+A2[8]+A2[9])\n C[0,1] = A2[0]/3.+4.*(A2[1]+A2[2])/3.-2.*A2[5]-A2[9]\n C[0,2] =(A2[0]-4.*A2[2])/3.+A2[5]-A2[8]\n C[0,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.+.5*(A2[5]+A2[7]+A2[8]+A2[9]-A2[12])\n C[1,1] = 2.*A2[8]\n C[1,2] =-4.*(2.*A2[1]+A2[2])/3.+2.*A2[5]+A2[8]+A2[9]+A2[12]\n C[1,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.-.5*A2[3]+A2[5]+.5*(A2[7]+A2[8]+A2[9])\n C[2,2] = 2.*A2[9]\n C[2,5] =-1.*A2[0]/6.+2.*A2[1]/3.-.5*(A2[3]+A2[4]-A2[7]-A2[8]-A2[9]-A2[12])\n C[3,3] = .5*A2[10]\n C[3,4] = .25*(A2[6]-A2[10]-A2[11])\n C[4,4] = .5*A2[11]\n C[5,5] = .5*A2[12]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Triclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'N'):\n C[0,0] = 2.*A2[0]\n C[0,1] = 1.*(-A2[0]-A2[1]+A2[6])\n C[0,2] = 1.*(-A2[0]-A2[2]+A2[7])\n C[0,3] = .5*(-A2[0]-A2[3]+A2[8]) \n C[0,4] = .5*(-A2[0]+A2[9]-A2[4])\n C[0,5] = .5*(-A2[0]+A2[10]-A2[5])\n C[1,1] = 2.*A2[1]\n C[1,2] = 1.*(A2[11]-A2[1]-A2[2])\n C[1,3] = .5*(A2[12]-A2[1]-A2[3])\n C[1,4] = .5*(A2[13]-A2[1]-A2[4])\n C[1,5] = .5*(A2[14]-A2[1]-A2[5])\n C[2,2] = 2.*A2[2] \n C[2,3] = .5*(A2[15]-A2[2]-A2[3])\n C[2,4] = .5*(A2[16]-A2[2]-A2[4])\n C[2,5] = .5*(A2[17]-A2[2]-A2[5])\n C[3,3] = .5*A2[3]\n C[3,4] = .25*(A2[18]-A2[3]-A2[4])\n C[3,5] = .25*(A2[19]-A2[3]-A2[5])\n C[4,4] = .5*A2[4]\n C[4,5] = .25*(A2[20]-A2[4]-A2[5])\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n elif self.__mthd == 'Stress':\n \n if (LC == 'CI' or \\\n LC == 'CII'):\n Matrix = np.mat([[1.0, 5.0, 0.0],\n [2.0, 4.0, 0.0],\n [3.0, 3.0, 0.0],\n [0.0, 0.0, 4.0],\n [0.0, 0.0, 5.0],\n [0.0, 0.0, 6.0]])\n \n if (LC == 'HI' or \\\n LC == 'HII'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0],\n [ 2, 1, 3, 0, 0],\n [ 0, 0, 3, 3, 0],\n [ 0, 0, 0, 0, 4],\n [ 0, 0, 0, 0, 5],\n [ 3,-3, 0, 0, 0],\n [ 3,-5,-1, 0, 0],\n [-5, 3,-1, 0, 0],\n [ 0, 0,-2,-1, 0],\n [ 0, 0, 0, 0, 6],\n [ 0, 0, 0, 0, 2],\n [-2, 2, 0, 0, 0]])\n \n if (LC == 'RI'):\n Matrix = np.mat([[ 1, 2, 3, 4, 0, 0],\n [ 2, 1, 3,-4, 0, 0],\n [ 0, 0, 3, 0, 3, 0],\n [ 0, 0, 0,-1, 0, 4],\n [ 0, 0, 0, 6, 0, 5],\n [ 3,-3, 0, 5, 0, 0],\n [ 3,-5,-1, 6, 0, 0],\n [-5, 3,-1,-6, 0, 0],\n [ 0, 0,-2, 0,-1, 0],\n [ 0, 0, 0, 8, 0, 6],\n [ 0, 0, 0,-4, 0, 2],\n [-2, 2, 0, 2, 0, 0]])\n \n if (LC == 'RII'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 0, 0],\n [ 2, 1, 3,-4,-5, 0, 0],\n [ 0, 0, 3, 0, 0, 3, 0],\n [ 0, 0, 0,-1,-6, 0, 4],\n [ 0, 0, 0, 6,-1, 0, 5],\n [ 3,-3, 0, 5,-4, 0, 0],\n [ 3,-5,-1, 6, 2, 0, 0],\n [-5, 3,-1,-6,-2, 0, 0],\n [ 0, 0,-2, 0, 0,-1, 0],\n [ 0, 0, 0, 8, 4, 0, 6],\n [ 0, 0, 0,-4, 8, 0, 2],\n [-2, 2, 0, 2,-6, 0, 0]])\n \n if (LC == 'TI'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0, 0],\n [ 2, 1, 3, 0, 0, 0],\n [ 0, 0, 3, 3, 0, 0],\n [ 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0, 0, 0, 6],\n [ 3,-5,-1, 0, 0, 0],\n [-5, 3,-1, 0, 0, 0],\n [ 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 0, 0,-4]])\n \n if (LC == 'TII'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0],\n [ 2, 1, 3,-6, 0, 0, 0],\n [ 0, 0, 3, 0, 3, 0, 0],\n [ 0, 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0,-1, 0, 0, 6],\n [ 3,-5,-1,-4, 0, 0, 0],\n [-5, 3,-1, 4, 0, 0, 0],\n [ 0, 0,-2, 0,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 8, 0, 0,-4]])\n \n if (LC == 'O'):\n Matrix = np.mat([[1, 2, 3, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 2, 3, 0, 0, 0, 0],\n [0, 0, 1, 0, 2, 3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 4, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 5, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 6],\n [3,-5,-1, 0, 0, 0, 0, 0, 0],\n [0, 3, 0,-5,-1, 0, 0, 0, 0],\n [0, 0, 3, 0,-5,-1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 6, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 2, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-4],\n [5, 4, 6, 0, 0, 0, 0, 0, 0],\n [0, 5, 0, 4, 6, 0, 0, 0, 0],\n [0, 0, 5, 0, 4, 6, 0, 0, 0],\n [0, 0, 0, 0, 0, 0,-2, 0, 0],\n [0, 0, 0, 0, 0, 0, 0,-1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-3]])\n \n if (LC == 'M'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 2, 3, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 2, 0, 3, 6, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0],\n [ 0, 0, 0, 1, 0, 0, 2, 0, 3, 0, 0, 0, 6],\n [-2, 1, 4,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 1, 4,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 1, 0, 4,-5, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0],\n [ 0, 0, 0,-2, 0, 0, 1, 0, 4, 0, 0,-5, 0],\n [ 3,-5,-1,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0,-5,-1,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0,-5, 0,-1,-4, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0],\n [ 0, 0, 0, 3, 0, 0,-5, 0,-1, 0, 0,-4, 0],\n [-4,-6, 5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0,-6, 5, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0,-6, 0, 5, 2, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0],\n [ 0, 0, 0,-4, 0, 0,-6, 0, 5, 0, 0, 2, 0],\n [ 5, 4, 6,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 4, 6,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 4, 0, 6,-3, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0],\n [ 0, 0, 0, 5, 0, 0, 4, 0, 6, 0, 0,-3, 0]])\n \n if (LC == 'N'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 0, 0, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 5, 6, 0, 0, 0],\n [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6, 0],\n [ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6],\n [-2, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 0, 0, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 6,-5, 0, 0, 0],\n [ 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5, 0],\n [ 0, 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5],\n [ 3,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0, 0, 0,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 2,-4, 0, 0, 0],\n [ 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4, 0],\n [ 0, 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4],\n [-4,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0, 0, 0,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1,-3, 2, 0, 0, 0],\n [ 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2, 0],\n [ 0, 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2],\n [ 5, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 0, 0, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2,-1,-3, 0, 0, 0],\n [ 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3, 0],\n [ 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3],\n [-6, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-6, 0, 0, 0, 0, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5,-4, 1, 0, 0, 0],\n [ 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1, 0],\n [ 0, 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1]])\n \n sigma = np.array(self.__sigma[etacalc])\n \n ci = np.linalg.lstsq(Matrix,sigma)\n \n #-- Cubic structures ------------------------------------------------------------------------------\n if (LC == 'CI' or \\\n LC == 'CII'):\n \n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[3,3]=ci[0][2]\n C[1,1]=C[0,0]\n C[2,2]=C[0,0]\n C[0,2]=C[0,1]\n C[1,2]=C[0,1]\n C[4,4]=C[3,3]\n C[5,5]=C[3,3]\n \n #-- Hexagonal Structures --------------------------------------------------------------------------\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[2,2]=ci[0][3]\n C[3,3]=ci[0][4]\n C[1,1]=C[0,0]\n C[1,2]=C[0,2]\n C[4,4]=C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral I Structures ---------------------------------------------------------------------\n if (LC == 'RI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral II Structures --------------------------------------------------------------------\n if (LC == 'RII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[0,4]= ci[0][4]\n C[2,2]= ci[0][5]\n C[3,3]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[1,4]=-C[0,4]\n C[3,5]=-C[0,4]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Tetragonal I Structures -----------------------------------------------------------------------\n if (LC == 'TI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[2,2]= ci[0][3]\n C[3,3]= ci[0][4]\n C[5,5]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[4,4]= C[3,3]\n \n #-- Tetragonal II Structures ----------------------------------------------------------------------\n if (LC == 'TII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,5]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[5,5]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,5]=-C[0,5]\n C[4,4]= C[3,3]\n \n #-- Orthorhombic Structures -----------------------------------------------------------------------\n if (LC == 'O'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[1,1]=ci[0][3]\n C[1,2]=ci[0][4]\n C[2,2]=ci[0][5]\n C[3,3]=ci[0][6]\n C[4,4]=ci[0][7]\n C[5,5]=ci[0][8]\n \n #-- Monoclinic Structures -------------------------------------------------------------------------\n if (LC == 'M'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,5]=ci[0][3]\n C[1,1]=ci[0][4]\n C[1,2]=ci[0][5]\n C[1,5]=ci[0][6]\n C[2,2]=ci[0][7]\n C[2,5]=ci[0][8]\n C[3,3]=ci[0][9]\n C[3,4]=ci[0][10]\n C[4,4]=ci[0][11]\n C[5,5]=ci[0][12]\n \n #-- Triclinic Structures --------------------------------------------------------------------------\n if (LC == 'N'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,3]=ci[0][3]\n C[0,4]=ci[0][4]\n C[0,5]=ci[0][5]\n C[1,1]=ci[0][6]\n C[1,2]=ci[0][7]\n C[1,3]=ci[0][8]\n C[1,4]=ci[0][9]\n C[1,5]=ci[0][10]\n C[2,2]=ci[0][11]\n C[2,3]=ci[0][12]\n C[2,4]=ci[0][13]\n C[2,5]=ci[0][14]\n C[3,3]=ci[0][15]\n C[3,4]=ci[0][16]\n C[3,5]=ci[0][17]\n C[4,4]=ci[0][18]\n C[4,5]=ci[0][19]\n C[5,5]=ci[0][20]\n #--------------------------------------------------------------------------------------------------\n \n \n \n for i in range(5):\n for j in range(i+1,6):\n C[j,i] = C[i,j] \n #%%%--- Calculating the elastic moduli ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if self.__cod in ['espresso']: C = -C/10.\n elif self.__cod in ['vasp','exciting','wien']: C = C*self.__vToGPa/self.__V0#C = C/4.#C=C*self.__CONV/self.__V0\n elif self.__cod in ['emto']: C = C*self.__ToGPa/self.__V0\n self.BV = (C[0,0]+C[1,1]+C[2,2]+2*(C[0,1]+C[0,2]+C[1,2]))/9\n self.GV = ((C[0,0]+C[1,1]+C[2,2])-(C[0,1]+C[0,2]+C[1,2])+3*(C[3,3]+C[4,4]+C[5,5]))/15\n self.EV = (9*self.BV*self.GV)/(3*self.BV+self.GV)\n self.nuV= (1.5*self.BV-self.GV)/(3*self.BV+self.GV)\n self.S = np.linalg.inv(C)\n self.BR = 1/(self.S[0,0]+self.S[1,1]+self.S[2,2]+2*(self.S[0,1]+self.S[0,2]+self.S[1,2]))\n self.GR =15/(4*(self.S[0,0]+self.S[1,1]+self.S[2,2])-4*(self.S[0,1]+self.S[0,2]+self.S[1,2])+3*(self.S[3,3]+self.S[4,4]+self.S[5,5]))\n self.ER = (9*self.BR*self.GR)/(3*self.BR+self.GR)\n self.nuR= (1.5*self.BR-self.GR)/(3*self.BR+self.GR)\n self.BH = 0.50*(self.BV+self.BR)\n self.GH = 0.50*(self.GV+self.GR)\n self.EH = (9.*self.BH*self.GH)/(3.*self.BH+self.GH)\n self.nuH= (1.5*self.BH-self.GH)/(3.*self.BH+self.GH)\n self.AVR= 100.*(self.GV-self.GR)/(self.GV+self.GR)\n #--------------------------------------------------------------------------------------------------------------------------------\n self.__C = C\n \n else:\n Cs = []\n for t in map(str,self.__T):#for t in range(len(self.__T)):\n C = np.zeros((6,6))\n \n LC = self.__structures.items()[0][1].LC\n if self.__mthd == 'Energy':\n if type(etacalc)==list:\n A2=[]\n for i in range(len(etacalc)):\n A2.append(self.__A2[t][i][etacalc[i]])\n else:\n if not etacalc in self.__A2[t][0].keys(): raise ValueError('Please coose one of %s'%(self.__A2[t][0].keys()))\n A2 = [a2[etacalc] for a2 in self.__A2[t]]\n \n #%%%--- Cubic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'CI' or \\\n LC == 'CII'):\n C[0,0] =-2.*(A2[0]-3.*A2[1])/3.\n C[1,1] = C[0,0]\n C[2,2] = C[0,0]\n C[3,3] = A2[2]/6.\n C[4,4] = C[3,3]\n C[5,5] = C[3,3]\n C[0,1] = (2.*A2[0]-3.*A2[1])/3.\n C[0,2] = C[0,1]\n C[1,2] = C[0,1]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Hexagonal structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = 2./3.*A2[0] + 4./3.*A2[1] - 2.*A2[2] - 2.*A2[3]\n C[0,2] = 1./6.*A2[0] - 2./3.*A2[1] + 0.5*A2[2]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[2]\n C[3,3] =-0.5*A2[2] + 0.5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RI'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[0,4] = .5*(-A2[3] - A2[4] + A2[6])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[1,4] =-C[0,4] \n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[3,5] =-C[0,4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TI'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[3]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[3]\n C[0,2] = A2[0]/6.-2.*A2[1]/3.+.5*A2[3]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[3]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TII'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[4]\n C[1,1] = C[0,0]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[4]\n C[0,2] = A2[0]/6.-(2./3.)*A2[1]+.5*A2[4]\n C[0,5] = (-A2[2]+A2[3]-A2[6])/4.\n C[1,2] = C[0,2]\n C[1,5] =-C[0,5]\n C[2,2] = 2.*A2[4]\n C[3,3] = .5*A2[5]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[6]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Orthorhombic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'O'):\n C[0,0] = 2.*A2[0]/3.+4.*A2[1]/3.+A2[3]-2.*A2[4]-2.*A2[5]\n C[0,1] = 1.*A2[0]/3.+2.*A2[1]/3.-.5*A2[3]-A2[5]\n C[0,2] = 1.*A2[0]/3.-2.*A2[1]/3.+4.*A2[2]/3.-.5*A2[3]-A2[4]\n C[1,1] = 2.*A2[4]\n C[1,2] =-2.*A2[1]/3.-4.*A2[2]/3.+.5*A2[3]+A2[4]+A2[5]\n C[2,2] = 2.*A2[5]\n C[3,3] = .5*A2[6]\n C[4,4] = .5*A2[7]\n C[5,5] = .5*A2[8]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Monoclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'M'):\n C[0,0] = 2.*A2[0]/3.+8.*(A2[1]+A2[2])/3.-2.*(A2[5]+A2[8]+A2[9])\n C[0,1] = A2[0]/3.+4.*(A2[1]+A2[2])/3.-2.*A2[5]-A2[9]\n C[0,2] =(A2[0]-4.*A2[2])/3.+A2[5]-A2[8]\n C[0,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.+.5*(A2[5]+A2[7]+A2[8]+A2[9]-A2[12])\n C[1,1] = 2.*A2[8]\n C[1,2] =-4.*(2.*A2[1]+A2[2])/3.+2.*A2[5]+A2[8]+A2[9]+A2[12]\n C[1,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.-.5*A2[3]+A2[5]+.5*(A2[7]+A2[8]+A2[9])\n C[2,2] = 2.*A2[9]\n C[2,5] =-1.*A2[0]/6.+2.*A2[1]/3.-.5*(A2[3]+A2[4]-A2[7]-A2[8]-A2[9]-A2[12])\n C[3,3] = .5*A2[10]\n C[3,4] = .25*(A2[6]-A2[10]-A2[11])\n C[4,4] = .5*A2[11]\n C[5,5] = .5*A2[12]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Triclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'N'):\n C[0,0] = 2.*A2[0]\n C[0,1] = 1.*(-A2[0]-A2[1]+A2[6])\n C[0,2] = 1.*(-A2[0]-A2[2]+A2[7])\n C[0,3] = .5*(-A2[0]-A2[3]+A2[8]) \n C[0,4] = .5*(-A2[0]+A2[9]-A2[4])\n C[0,5] = .5*(-A2[0]+A2[10]-A2[5])\n C[1,1] = 2.*A2[1]\n C[1,2] = 1.*(A2[11]-A2[1]-A2[2])\n C[1,3] = .5*(A2[12]-A2[1]-A2[3])\n C[1,4] = .5*(A2[13]-A2[1]-A2[4])\n C[1,5] = .5*(A2[14]-A2[1]-A2[5])\n C[2,2] = 2.*A2[2] \n C[2,3] = .5*(A2[15]-A2[2]-A2[3])\n C[2,4] = .5*(A2[16]-A2[2]-A2[4])\n C[2,5] = .5*(A2[17]-A2[2]-A2[5])\n C[3,3] = .5*A2[3]\n C[3,4] = .25*(A2[18]-A2[3]-A2[4])\n C[3,5] = .25*(A2[19]-A2[3]-A2[5])\n C[4,4] = .5*A2[4]\n C[4,5] = .25*(A2[20]-A2[4]-A2[5])\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n elif self.__mthd == 'Stress':\n \n if (LC == 'CI' or \\\n LC == 'CII'):\n Matrix = np.mat([[1.0, 5.0, 0.0],\n [2.0, 4.0, 0.0],\n [3.0, 3.0, 0.0],\n [0.0, 0.0, 4.0],\n [0.0, 0.0, 5.0],\n [0.0, 0.0, 6.0]])\n \n if (LC == 'HI' or \\\n LC == 'HII'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0],\n [ 2, 1, 3, 0, 0],\n [ 0, 0, 3, 3, 0],\n [ 0, 0, 0, 0, 4],\n [ 0, 0, 0, 0, 5],\n [ 3,-3, 0, 0, 0],\n [ 3,-5,-1, 0, 0],\n [-5, 3,-1, 0, 0],\n [ 0, 0,-2,-1, 0],\n [ 0, 0, 0, 0, 6],\n [ 0, 0, 0, 0, 2],\n [-2, 2, 0, 0, 0]])\n \n if (LC == 'RI'):\n Matrix = np.mat([[ 1, 2, 3, 4, 0, 0],\n [ 2, 1, 3,-4, 0, 0],\n [ 0, 0, 3, 0, 3, 0],\n [ 0, 0, 0,-1, 0, 4],\n [ 0, 0, 0, 6, 0, 5],\n [ 3,-3, 0, 5, 0, 0],\n [ 3,-5,-1, 6, 0, 0],\n [-5, 3,-1,-6, 0, 0],\n [ 0, 0,-2, 0,-1, 0],\n [ 0, 0, 0, 8, 0, 6],\n [ 0, 0, 0,-4, 0, 2],\n [-2, 2, 0, 2, 0, 0]])\n \n if (LC == 'RII'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 0, 0],\n [ 2, 1, 3,-4,-5, 0, 0],\n [ 0, 0, 3, 0, 0, 3, 0],\n [ 0, 0, 0,-1,-6, 0, 4],\n [ 0, 0, 0, 6,-1, 0, 5],\n [ 3,-3, 0, 5,-4, 0, 0],\n [ 3,-5,-1, 6, 2, 0, 0],\n [-5, 3,-1,-6,-2, 0, 0],\n [ 0, 0,-2, 0, 0,-1, 0],\n [ 0, 0, 0, 8, 4, 0, 6],\n [ 0, 0, 0,-4, 8, 0, 2],\n [-2, 2, 0, 2,-6, 0, 0]])\n \n if (LC == 'TI'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0, 0],\n [ 2, 1, 3, 0, 0, 0],\n [ 0, 0, 3, 3, 0, 0],\n [ 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0, 0, 0, 6],\n [ 3,-5,-1, 0, 0, 0],\n [-5, 3,-1, 0, 0, 0],\n [ 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 0, 0,-4]])\n \n if (LC == 'TII'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0],\n [ 2, 1, 3,-6, 0, 0, 0],\n [ 0, 0, 3, 0, 3, 0, 0],\n [ 0, 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0,-1, 0, 0, 6],\n [ 3,-5,-1,-4, 0, 0, 0],\n [-5, 3,-1, 4, 0, 0, 0],\n [ 0, 0,-2, 0,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 8, 0, 0,-4]])\n \n if (LC == 'O'):\n Matrix = np.mat([[1, 2, 3, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 2, 3, 0, 0, 0, 0],\n [0, 0, 1, 0, 2, 3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 4, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 5, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 6],\n [3,-5,-1, 0, 0, 0, 0, 0, 0],\n [0, 3, 0,-5,-1, 0, 0, 0, 0],\n [0, 0, 3, 0,-5,-1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 6, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 2, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-4],\n [5, 4, 6, 0, 0, 0, 0, 0, 0],\n [0, 5, 0, 4, 6, 0, 0, 0, 0],\n [0, 0, 5, 0, 4, 6, 0, 0, 0],\n [0, 0, 0, 0, 0, 0,-2, 0, 0],\n [0, 0, 0, 0, 0, 0, 0,-1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-3]])\n \n if (LC == 'M'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 2, 3, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 2, 0, 3, 6, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0],\n [ 0, 0, 0, 1, 0, 0, 2, 0, 3, 0, 0, 0, 6],\n [-2, 1, 4,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 1, 4,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 1, 0, 4,-5, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0],\n [ 0, 0, 0,-2, 0, 0, 1, 0, 4, 0, 0,-5, 0],\n [ 3,-5,-1,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0,-5,-1,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0,-5, 0,-1,-4, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0],\n [ 0, 0, 0, 3, 0, 0,-5, 0,-1, 0, 0,-4, 0],\n [-4,-6, 5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0,-6, 5, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0,-6, 0, 5, 2, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0],\n [ 0, 0, 0,-4, 0, 0,-6, 0, 5, 0, 0, 2, 0],\n [ 5, 4, 6,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 4, 6,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 4, 0, 6,-3, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0],\n [ 0, 0, 0, 5, 0, 0, 4, 0, 6, 0, 0,-3, 0]])\n \n if (LC == 'N'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 0, 0, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 5, 6, 0, 0, 0],\n [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6, 0],\n [ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6],\n [-2, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 0, 0, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 6,-5, 0, 0, 0],\n [ 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5, 0],\n [ 0, 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5],\n [ 3,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0, 0, 0,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 2,-4, 0, 0, 0],\n [ 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4, 0],\n [ 0, 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4],\n [-4,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0, 0, 0,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1,-3, 2, 0, 0, 0],\n [ 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2, 0],\n [ 0, 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2],\n [ 5, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 0, 0, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2,-1,-3, 0, 0, 0],\n [ 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3, 0],\n [ 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3],\n [-6, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-6, 0, 0, 0, 0, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5,-4, 1, 0, 0, 0],\n [ 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1, 0],\n [ 0, 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1]])\n \n sigma = np.array(self.__sigma[etacalc])\n \n ci = np.linalg.lstsq(Matrix,sigma)\n \n #-- Cubic structures ------------------------------------------------------------------------------\n if (LC == 'CI' or \\\n LC == 'CII'):\n \n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[3,3]=ci[0][2]\n C[1,1]=C[0,0]\n C[2,2]=C[0,0]\n C[0,2]=C[0,1]\n C[1,2]=C[0,1]\n C[4,4]=C[3,3]\n C[5,5]=C[3,3]\n \n #-- Hexagonal Structures --------------------------------------------------------------------------\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[2,2]=ci[0][3]\n C[3,3]=ci[0][4]\n C[1,1]=C[0,0]\n C[1,2]=C[0,2]\n C[4,4]=C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral I Structures ---------------------------------------------------------------------\n if (LC == 'RI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral II Structures --------------------------------------------------------------------\n if (LC == 'RII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[0,4]= ci[0][4]\n C[2,2]= ci[0][5]\n C[3,3]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[1,4]=-C[0,4]\n C[3,5]=-C[0,4]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Tetragonal I Structures -----------------------------------------------------------------------\n if (LC == 'TI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[2,2]= ci[0][3]\n C[3,3]= ci[0][4]\n C[5,5]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[4,4]= C[3,3]\n \n #-- Tetragonal II Structures ----------------------------------------------------------------------\n if (LC == 'TII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,5]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[5,5]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,5]=-C[0,5]\n C[4,4]= C[3,3]\n \n #-- Orthorhombic Structures -----------------------------------------------------------------------\n if (LC == 'O'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[1,1]=ci[0][3]\n C[1,2]=ci[0][4]\n C[2,2]=ci[0][5]\n C[3,3]=ci[0][6]\n C[4,4]=ci[0][7]\n C[5,5]=ci[0][8]\n \n #-- Monoclinic Structures -------------------------------------------------------------------------\n if (LC == 'M'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,5]=ci[0][3]\n C[1,1]=ci[0][4]\n C[1,2]=ci[0][5]\n C[1,5]=ci[0][6]\n C[2,2]=ci[0][7]\n C[2,5]=ci[0][8]\n C[3,3]=ci[0][9]\n C[3,4]=ci[0][10]\n C[4,4]=ci[0][11]\n C[5,5]=ci[0][12]\n \n #-- Triclinic Structures --------------------------------------------------------------------------\n if (LC == 'N'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,3]=ci[0][3]\n C[0,4]=ci[0][4]\n C[0,5]=ci[0][5]\n C[1,1]=ci[0][6]\n C[1,2]=ci[0][7]\n C[1,3]=ci[0][8]\n C[1,4]=ci[0][9]\n C[1,5]=ci[0][10]\n C[2,2]=ci[0][11]\n C[2,3]=ci[0][12]\n C[2,4]=ci[0][13]\n C[2,5]=ci[0][14]\n C[3,3]=ci[0][15]\n C[3,4]=ci[0][16]\n C[3,5]=ci[0][17]\n C[4,4]=ci[0][18]\n C[4,5]=ci[0][19]\n C[5,5]=ci[0][20]\n #--------------------------------------------------------------------------------------------------\n \n \n \n for i in range(5):\n for j in range(i+1,6):\n C[j,i] = C[i,j] \n #%%%--- Calculating the elastic moduli ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if self.__cod == 'espresso': C = -C/10.\n elif self.__cod in ['vasp','emto','exciting','wien']: C=C*self.__vToGPa/self.__V0#C = C/4.#C=C*self.__CONV/self.__V0#C = C/4.\n self.BV = (C[0,0]+C[1,1]+C[2,2]+2*(C[0,1]+C[0,2]+C[1,2]))/9\n self.GV = ((C[0,0]+C[1,1]+C[2,2])-(C[0,1]+C[0,2]+C[1,2])+3*(C[3,3]+C[4,4]+C[5,5]))/15\n self.EV = (9*self.BV*self.GV)/(3*self.BV+self.GV)\n self.nuV= (1.5*self.BV-self.GV)/(3*self.BV+self.GV)\n self.S = np.linalg.inv(C)\n self.BR = 1/(self.S[0,0]+self.S[1,1]+self.S[2,2]+2*(self.S[0,1]+self.S[0,2]+self.S[1,2]))\n self.GR =15/(4*(self.S[0,0]+self.S[1,1]+self.S[2,2])-4*(self.S[0,1]+self.S[0,2]+self.S[1,2])+3*(self.S[3,3]+self.S[4,4]+self.S[5,5]))\n self.ER = (9*self.BR*self.GR)/(3*self.BR+self.GR)\n self.nuR= (1.5*self.BR-self.GR)/(3*self.BR+self.GR)\n self.BH = 0.50*(self.BV+self.BR)\n self.GH = 0.50*(self.GV+self.GR)\n self.EH = (9.*self.BH*self.GH)/(3.*self.BH+self.GH)\n self.nuH= (1.5*self.BH-self.GH)/(3.*self.BH+self.GH)\n self.AVR= 100.*(self.GV-self.GR)/(self.GV+self.GR)\n #--------------------------------------------------------------------------------------------------------------------------------\n Cs.append(C)\n self.__C = Cs",
"def get_energies(data, cencol, save_path = None, eltname = '', **kwargs):\n # element id\n try:\n kalpha = emission[eltname]['ka1']\n kbeta = emission[eltname]['kb']\n except KeyError:\n raise KeyError(\"element identifier not found: \" + eltname)\n\n spectrum = lineout(data, cencol)[::-1]\n frame_dimension = len(spectrum)\n nalpha = np.argmax(spectrum)\n offset = nalpha + 20\n nbeta = np.argmax(spectrum[offset:]) + offset\n\n # calculate position of peak positions on spectrometer\n thalpha = math.asin(e0/kalpha)\n posalpha = 103.4/(math.tan(thalpha))\n thbeta = math.asin(e0/kbeta)\n posbeta = 103.4/(math.tan(thbeta))\n\n # calculate pixel size\n pxsize = (posbeta - posalpha)/(nalpha - nbeta)\n\n # calculate pixel positions\n pixels = range(nalpha,nalpha-frame_dimension,-1)\n pixels = [ posalpha + pxsize*n for n in pixels ]\n\n # calculate Bragg angles and energies for graphite 002\n thetalist = [ math.atan(103.4/p) for p in pixels ]\n elist = [ 1848.303414/(math.sin(theta)) for theta in thetalist ]\n\n nrm = np.sum(spectrum[max(nalpha-40,0):min(nalpha+40,frame_dimension)])\n energies = elist[::-1]\n if save_path:\n if not os.path.dirname(save_path):\n os.system('mkdir -p ' + os.path.dirname(save_path))\n save_calib(save_path, energies)\n return np.array(energies)",
"def B_res_e(f_ece, harm = 1.):\n return me/eV2J*f_ece*np.pi*2./harm",
"def read_uef_details(chunks):\n\n\tpos, chunk = find_next_chunk(chunks, 0, [0x0])\n\n\tif pos == None:\n\n\t\toriginator = 'Unknown'\n\n\telif chunk[1] == '':\n\n\t\toriginator = 'Unknown'\n\telse:\n\t\toriginator = chunk[1]\n\n\tpos, chunk = find_next_chunk(chunks, 0, [0x5])\n\n\tif pos == None:\n\n\t\tmachine, keyboard = 'Unknown', 'Unknown'\n\n\telse:\n\n\t\tmachines = ('BBC Model A', 'Electron', 'BBC Model B', 'BBC Master')\n\t\tkeyboards = ('Any layout', 'Physical layout', 'Remapped')\n\n\t\tmachine = ord(chunk[1][0]) & 0x0f\n\t\tkeyboard = (ord(chunk[1][0]) & 0xf0) >> 4\n\n\t\tif machine < len(machines):\n\t\t\tmachine = machines[machine]\n\t\telse:\n\t\t\tmachine = 'Unknown'\n\n\t\tif keyboard < len(keyboards):\n\t\t\tkeyboard = keyboards[keyboard]\n\t\telse:\n\t\t\tkeyboard = 'Unknown'\n\n\tpos, chunk = find_next_chunk(chunks, 0, [0xff00])\n\n\tif pos == None:\n\n\t\temulator = 'Unknown'\n\n\telif chunk[1] == '':\n\n\t\temulator = 'Unknown'\n\telse:\n\t\temulator = chunk[1]\n\n\n\t# Remove trailing null bytes\n\twhile originator[-1] == '\\000':\n\n\t\toriginator = originator[:-1]\n\n\twhile emulator[-1] == '\\000':\n\n\t\temulator = emulator[:-1]\n\n\tfeatures = ''\n\tif find_next_chunk(chunks, 0, [0x1])[0] != None:\n\t\tfeatures = features + '\\n' + 'Instructions'\n\tif find_next_chunk(chunks, 0, [0x2])[0] != None:\n\t\tfeatures = features + '\\n' + 'Credits'\n\tif find_next_chunk(chunks, 0, [0x3])[0] != None:\n\t\tfeatures = features + '\\n' + 'Inlay'\n\n\treturn originator, machine, keyboard, emulator, features",
"def cep(self):\n return self._cep",
"def grep_fermi(atoms):\n\tlfermi = sub.check_output(\"grep Fermi {}.scf.out\".format(atoms), shell=True).decode(\"utf-8\") # the Fermi energy is 17.4819 ev\n\tE_fermi_re = re.search(r\".*Fermi energy is\\s*([+-]?\\d*\\.?\\d*).*\", lfermi)\n\tE_fermi = float(E_fermi_re.group(1))\n\treturn E_fermi",
"def test_elastic_bug_2():\n test_file = os.path.join(DATA_DIR, '2324.out')\n parser = CRYSTOUT(test_file)\n info = parser.info\n assert info['prog'] == '17 1.0.2' # CRYSTAL version\n assert info['finished'] == 2 # finished without errors\n assert info['energy'] == -6.3910338752478E+03 * Ha # energy in eV\n assert info['k'] == '8x8x8' # Monkhorst-Pack net\n assert info['elastic']['K_V'] == -122.44",
"def get_internal_energy(filename):\n # --------------- helper functions --------------- #\n def parse_data(block):\n \"\"\"\n Parse the line(s) to get the data.\n \"\"\"\n rval = {\n 'Total' : None,\n 'Electronic' : None,\n 'Translational' : None,\n 'Rotational' : None,\n 'Vibrational' : None\n }\n for line in block.splitlines():\n if re.match(r'^\\s*Total', line):\n key = 'Total'\n elif re.match(r'^\\s*Electronic', line):\n key = 'Electronic'\n elif re.match(r'^\\s*Translational', line):\n key = 'Translational'\n elif re.match(r'^\\s*Rotational', line):\n key = 'Rotational'\n elif re.match(r'^\\s*Vibrational', line):\n key = 'Vibrational'\n else:\n key = None\n if key:\n words = line.strip().split()\n try:\n rval[key] = float(words[1])\n except ValueError:\n raise ValueError('Invalid thermodynamic format.')\n return rval\n # ------------- end helper functions ------------- #\n # open the file, if a string\n if isinstance(filename, str):\n ifs = open(filename, 'r')\n else:\n ifs = filename\n # extract the relevent lines\n start = r'^\\s*E\\s+\\(Thermal\\)'\n stop = r'^\\s*Vibrational'\n rre = RegexRangeExtractor(start, stop,\n include_start=True,\n include_stop=True)\n block = rre(ifs)[0]\n # close file\n if ifs is not filename:\n ifs.close()\n # parse data\n #+ single value/file\n rval = parse_data(block)\n return rval",
"def absorption_energy_eV(self):\n return self._absorption_energy_eV.copy()",
"def get_E(self):\r\n return self.E",
"def eci(self):\n return self.__eci",
"def _group_energy_terms(ener_xvg):\n with open(ener_xvg) as f:\n all_lines = f.readlines()\n energy_types = [line.split('\"')[1] for line in all_lines if line[:3] == '@ s']\n energy_values = [float(x) * units.kilojoule_per_mole for x in all_lines[-1].split()[1:]]\n e_out = OrderedDict(zip(energy_types, energy_values))\n return e_out, ener_xvg",
"def _read_eeg(eeg_file):\r\n pass",
"def extract_energies(self):\n path2save = 'Analysis/energies.pkl'\n #check, if I have to extract them, or they are already extracted. This the latter case, load them.\n if os.path.exists(path2save):\n print(\"extraction of the polarizaion has already been done. Loading polarizations from from pkl\")\n # TODO delete to check if exists above and do load without doing\n with open('Analysis/energies.pkl', 'rb') as fid:\n [self.E0_plus, self.E0_0, self.E0_minus,\n self.V0_plus, self.V0_0, self.V0_minus,\n self.V_env_plus, self.V_env_0, self.V_env_minus,\n self.E_env_plus, self.E_env_0, self.E_env_minus,\n self.n_mols] \\\n = pickle.load(fid)\n else:\n print('Energies are being extracting and will be saved to pkl')\n for i, radius in enumerate(self.radii):\n self.E_sd_plus[radius] = {}\n self.E_sd_0[radius] = {}\n self.E_sd_minus[radius] = {}\n\n self.E_sum_env_plus[radius] = {}\n self.E_sum_env_0[radius] = {}\n self.E_sum_env_minus[radius] = {}\n\n self.V0_plus[radius] = {}\n self.V0_0[radius] = {}\n self.V0_minus[radius] = {}\n\n self.E_env_plus[radius] = {}\n self.E_env_0[radius] = {}\n self.E_env_minus[radius] = {}\n\n self.V_env_plus[radius] = {}\n self.V_env_0[radius] = {}\n self.V_env_minus[radius] = {}\n\n self.n_mols[radius] = {}\n\n for j, core_id in enumerate(self.core_ids):\n #path2file_ip = \\\n # 'Analysis/' + self.dict_radii_folder_IP[radius] + '/Matrix-analysis-IP_' \\\n # + self.mol_name + '-Mol_' + str(core_id) + '_C_1.yml'\n\n path2file_ip = \\\n 'Analysis/IP_by_radius/' + self.dict_radii_folder_IP[radius]\\\n + '/Matrix-analysis-IP_' + self.mol_name + '.yml' # new\n path2file_ea = \\\n 'Analysis/EA_by_radius/' + self.dict_radii_folder_EA[radius]\\\n + '/Matrix-analysis-EA_' + self.mol_name + '.yml'\n\n # IP. Charged states: \"+\" and \"0\"\n with open(path2file_ip) as fid:\n ip_dict = yaml.load(fid, Loader=yaml.SafeLoader)\n with open(path2file_ea) as fid:\n ea_dict = yaml.load(fid, Loader=yaml.SafeLoader)\n\n\n # number of mols extraction\n self.n_mols[radius][core_id] = len(ip_dict[int(core_id)]['energies'])\n\n # sd extraction. E_sd = E_0 + V_0\n self.E_sd_plus[radius][core_id] = ip_dict[int(core_id)]['energies'][int(core_id)]['total_e_charged'] #new\n self.E_sd_0[radius][core_id] = ip_dict[core_id]['energies'][int(core_id)]['total_e_uncharged']\n self.E_sd_minus[radius][core_id] = ea_dict[int(core_id)]['energies'][int(core_id)]['total_e_charged']\n # E_0\n self.E0_plus[core_id] = ip_dict[int(core_id)]['energies'][int(core_id)]['total_e_charged_vacuum']\n self.E0_0[core_id] = ip_dict[int(core_id)]['energies'][int(core_id)]['total_e_uncharged_vacuum']\n self.E0_minus[core_id] = ea_dict[int(core_id)]['energies'][int(core_id)]['total_e_charged_vacuum']\n # # E_0_vacuum\n # self.E0_plus_vacuum[core_id] =\n # self.E0_0_vacuum[core_id] =\n # self.E0_minus_vacuum[core_id] =\n\n\n # V_0\n self.V0_plus[radius][core_id] = self.E_sd_plus[radius][core_id] - self.E0_plus[core_id]\n self.V0_0[radius][core_id] = self.E_sd_0[radius][core_id] - self.E0_0[core_id]\n self.V0_minus[radius][core_id] = self.E_sd_minus[radius][core_id] - self.E0_minus[core_id]\n\n # E_sum_env = \\sum_i\\ne 0 E_i \\sum_{j=0}^{N} V_{ij}\n ip_env_sub_dict = ip_dict[int(core_id)]['energies']#new\n del ip_env_sub_dict[int(core_id)]\n # del ip_env_sub_dict['info'] # TODO: do I need to dlt this?\n\n\n ea_env_sub_dict = ea_dict[int(core_id)]['energies'] # new\n del ea_env_sub_dict[int(core_id)]\n # del ea_env_sub_dict['info'] # TODO: do I need to dlt this?\n\n # tmp = ip_env_sub_dict['energies'][]\n\n list_total_e_env_plus = [ip_env_sub_dict[env_id]['total_e_charged'] for env_id in ip_env_sub_dict]\n self.E_sum_env_plus[radius][int(core_id)] = np.sum(list_total_e_env_plus) if not list_total_e_env_plus == [] else 0.0\n list_total_e_env_0 = [ip_env_sub_dict[env_id]['total_e_uncharged'] for env_id in ip_env_sub_dict]\n self.E_sum_env_0[radius][int(core_id)] = np.sum(list_total_e_env_0) if not list_total_e_env_0 == [] else 0.0\n list_total_e_env_minus = [ea_env_sub_dict[env_id]['total_e_charged'] for env_id in ea_env_sub_dict]\n self.E_sum_env_minus[radius][int(core_id)] = np.sum(list_total_e_env_minus) if not list_total_e_env_minus == [] else 0.0\n\n # E_env = \\sum_i \\ne 0 E_i. sum of DFT env energies.\n list_vacuum_env_e_plus = [ip_env_sub_dict[env_id]['total_e_charged_vacuum'] for env_id in ip_env_sub_dict]\n self.E_env_plus[radius][int(core_id)] = np.sum(list_vacuum_env_e_plus) if not list_vacuum_env_e_plus == [] else 0.0\n list_vacuum_env_e_0 = [ip_env_sub_dict[env_id]['total_e_uncharged_vacuum'] for env_id in ip_env_sub_dict]\n self.E_env_0[radius][int(core_id)] = np.sum(list_vacuum_env_e_0) if not list_vacuum_env_e_0 == [] else 0.0\n list_vacuum_env_e_minus = [ea_env_sub_dict[env_id]['total_e_charged_vacuum'] for env_id in ea_env_sub_dict]\n self.E_env_minus[radius][int(core_id)] = np.sum(list_vacuum_env_e_minus) if not list_vacuum_env_e_minus == [] else 0.0\n\n # V_env = 0.5 (\\sum_{i=1} \\sum_{j=1} V_{ij}). classical interaction of env. mols\n self.V_env_plus[radius][core_id] = 0.5 * (self.E_sum_env_plus[radius][core_id]\n - self.E_env_plus[radius][core_id]\n - self.V0_plus[radius][core_id])\n\n self.V_env_0[radius][core_id] = 0.5 * (self.E_sum_env_0[radius][core_id]\n - self.E_env_0[radius][core_id]\n - self.V0_0[radius][core_id])\n\n self.V_env_minus[radius][core_id] = 0.5 * (self.E_sum_env_minus[radius][core_id]\n - self.E_env_minus[radius][core_id]\n - self.V0_minus[radius][core_id])\n\n\n append_dict_with_mean(self.V0_plus, self.V0_0, self.V0_minus,\n self.V_env_plus, self.V_env_0, self.V_env_minus,\n self.E_env_plus, self.E_env_0, self.E_env_minus,\n self.E0_plus, self.E0_0, self.E0_minus,\n self.n_mols) # compute and add \"mean\" to all mentioned dicts\n\n with open('Analysis/energies.pkl', 'wb') as fid:\n pickle.dump([self.E0_plus, self.E0_0, self.E0_minus,\n self.V0_plus, self.V0_0, self.V0_minus,\n self.V_env_plus, self.V_env_0, self.V_env_minus,\n self.E_env_plus, self.E_env_0, self.E_env_minus,\n self.n_mols],\n fid)\n print(\"Energies are extracted and dumped to pkl\")",
"def erpac(self):\n return self._erpac",
"def cse_elastic(energy, elements, stoic):\n i = 0\n cs_el = 0\n gamma = float(1.+energy/ElectronMass)\n beta = sqrt(1.-1./(gamma*gamma))\n for Z in elements:\n Z = float(Z)\n cs_el += 1.0e-14*1.4e-6*(Z**1.5)*(1.-(0.26*Z/(137.*beta)))/(beta*beta)*stoic[i]\n i += 1\n\n return cs_el",
"def _get_econt_info(self, out_log):\n f = open_general(out_log)\n tmptxt = f.readlines()\n f.close()\n econt = {}\n itmp = search_string('[read_energy] number of energy points', tmptxt)\n if itmp>=0: econt['Nepts'] = int(tmptxt.pop(itmp).split()[-1])\n itmp = search_string('energies and weights are:', tmptxt)\n if itmp>=0:\n tmp = []\n for ie in range(econt['Nepts']):\n tmpline = tmptxt[itmp+4+ie].split()[1:]\n tmp.append([float(tmpline[0]), float(tmpline[1]), float(tmpline[2]), float(tmpline[3])])\n tmp = array(tmp)\n econt['epts'] = tmp[:,:2]\n econt['weights'] = tmp[:,2:]\n econt['emin'] = tmp[0,0]\n return econt",
"def get_exchanged_euros(model):\n exchanged_euros = np.sum([v.exchanged_euros for k, v in model.schedule.agents_by_type['Customer'].items()])\n return round(float(np.sum(exchanged_euros)), 2)",
"def get_E(self):\n return self.E",
"def energy_cal():\n energy_cal = np.load(energy_file)\n return energy_cal"
]
| [
"0.614474",
"0.61137724",
"0.61131305",
"0.58843523",
"0.5829121",
"0.576634",
"0.5709528",
"0.5694991",
"0.5688178",
"0.564943",
"0.56155294",
"0.5608964",
"0.55574423",
"0.554091",
"0.55266964",
"0.55205625",
"0.5486249",
"0.54649156",
"0.53992087",
"0.53921926",
"0.5388455",
"0.5386924",
"0.537741",
"0.53634113",
"0.5359043",
"0.53588855",
"0.5334286",
"0.5320326",
"0.53134644",
"0.53088593"
]
| 0.6570786 | 0 |
return date of next car, according to given schedule. Schedule is considered as a list of Horaire objects, that represent when a car (or tram) arrives to a stop. travel_time input must be a Horaire object. Returned value is the index of next car in schedule. (so, access to schedule[returned_value] give the Horaire object. | def next_car(schedule, travel_time):
#travel_time = datetime.datetime.strptime(datetime.datetime.strptime(travel_time, '%Y-%m-%d %H:%M:%S').strftime('%H:%M:%S'), '%H:%M:%S')
next_time = None
for index, time in enumerate(schedule):
if time.is_after(travel_time):
next_time = index
break
#time = datetime.datetime.strptime(time, '%H:%M:%S')
## total_seconds is negative if travel_time is upper than time in schedule
## if its possible to get car when arrived at the same time it go to next stop
## just use >= instead of >
#if (time - travel_time).total_seconds() > 0:
##print('SÉÉ:', time.__class__, time)
##print('SÉÉ:', travel_time.__class__, travel_time)
##print(time - travel_time)
##print('AUI:', travel_time - time, (travel_time - time).total_seconds())
##print('RET:', time)
#next_time = index
#break
return 0 if next_time is None else next_time | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_next(self,day,link,current_time,route): \n if day not in self.data:\n print('missing day')\n return None\n if link not in self.data[day]:\n print([l for l in self.data[day]])\n print('missing link')\n return None\n if route not in self.data[day][link]:\n print('missing route')\n return None\n import numpy as np\n try:\n #efficent numpy look up.\n index=[np.searchsorted(self.data[day][link][route][0:,0],current_time)]\n except:\n print('broken for this route')\n return None\n if index[0] < len(self.data[day][link][route]):\n return self.data[day][link][route][index[0]]\n else:\n return None",
"def get_next_occurrence(self, start, time):\n d = datetime.datetime.combine(start, time)\n if d < start:\n d += datetime.timedelta(days=1)\n return d.astimezone(self.time_zone)",
"def transit_next(self, vehicle, time):\n events = []\n vehicle.current += 1\n cur_stop = vehicle.route[vehicle.current]\n\n # pickup passengers\n trip_id = vehicle.id.split('_')[0]\n for (end_stop, action) in self.stops[cur_stop['stop_id']][trip_id]:\n logger.debug('[{}] {} Picking up passengers at {}'.format(time, vehicle.id, cur_stop['stop_id']))\n vehicle.passengers[end_stop].append(action)\n self.stops[cur_stop['stop_id']][trip_id] = []\n\n # dropoff passengers\n for action in vehicle.passengers[cur_stop['stop_id']]:\n logger.debug('[{}] {} Dropping off passengers at {}'.format(time, vehicle.id, cur_stop['stop_id']))\n events.extend(action(time))\n vehicle.passengers[cur_stop['stop_id']] = []\n\n try:\n next_stop = vehicle.route[vehicle.current + 1]\n except IndexError:\n # trip is done\n return events\n\n # schedule next leg of trip\n time = next_stop['arr_sec'] - cur_stop['dep_sec']\n events.append((time, partial(self.transit_next, vehicle)))\n return events",
"def test_get_next_competition():\n result = schedule.get_next_competition()\n\n if result:\n assert result['name'], 'Result has no `name` key'\n assert result['date'], 'Result has not `date` key'\n\n assert isinstance(result['name'], str), 'name is not a string'\n assert isinstance(result['date'], arrow.Arrow), 'date is not a date'",
"def doRide(car, ride):\n global MAX_DISTANCE_START, MAX_DISTANCE_FINISH\n (a, b, x, y, s, f) = ride\n lenght_ride = abs(x - a) + abs(y - b)\n # Simple heuristic to make it faster\n if lenght_ride > MAX_DISTANCE_FINISH: # So it doesn't take too long rides\n return None\n if car is None or len(car) == 0: # No car or no rides asigned to the car\n (cx, cy) = INITIAL_POS\n cs = INITIAL_TIME\n else: # Else, look in the list\n last_ride = car[-1]\n (cx, cy) = tuple(rides[last_ride][0:2]) # Position of the car\n # When will the car be at that position\n cs = rides_done[last_ride][2]\n # Distance to the ride's starting intersection\n distance = abs(cx - a) + abs(cy - b)\n if distance > MAX_DISTANCE_START: # Do not take too far away ones\n return None\n when = max(cs + distance, s)\n if when + lenght_ride > f: # The car cant make it\n return None\n\n return when, when + lenght_ride, when == s",
"def next_schedule(self) -> datetime:\n return next(self._interval)",
"def NextTransit(body, prevTransitTime):\n startTime = prevTransitTime.AddDays(100.0)\n return SearchTransit(body, startTime)",
"def get_next_index(car_position):\n next_index = False\n\n index_car_position = car_position[2]\n next_index_car_position = index_car_position + 1\n next_car_position = self.waypoints_list[\n next_index_car_position]\n\n if next_car_position[1] == car_position[1]:\n if car_position[1] < vehicle_posy < next_car_position[1]:\n next_index = True\n # rospy.logerr(\"check_point1\")\n\n elif next_car_position[0] == car_position[0]:\n if car_position[0] < vehicle_posx < next_car_position[0]:\n next_index = True\n # rospy.logerr(\"check_point2\")\n\n else:\n coeff1 = ((next_car_position[1] - car_position[1]) /\n (next_car_position[0] - car_position[0]))\n\n coeff2 = -(1 / coeff1)\n\n b_low = car_position[1] - (coeff2 * car_position[0])\n b_high = next_car_position[1] - (coeff2 * next_car_position[0])\n\n x_b_p = (self.pose_position.y - b_low) / coeff2\n y_b_p = (coeff2 * self.pose_position.x) + b_low\n\n x_b_np = (self.pose_position.y - b_high) / coeff2\n y_b_np = (coeff2 * self.pose_position.x) + b_high\n\n if x_b_p < vehicle_posx < x_b_np and \\\n y_b_p < vehicle_posy < y_b_np:\n next_index = True\n\n if next_index:\n index_car_position = car_position[2] + 1\n next_index_car_position = index_car_position + 1\n next_car_position = self.waypoints_list[\n next_index_car_position]\n\n return index_car_position",
"def get_next_trading_day_schedule(reference_day: dt):\n reference_day = reference_day.date()\n schedule = get_trading_calendar(reference_day, reference_day)\n while schedule.empty:\n reference_day += timedelta(days=1)\n schedule = get_trading_calendar(reference_day, reference_day)\n return schedule",
"def getNext(self, carPos):\n if carPos.lane != self:\n print \"car is on other lane\"\n return []\n next = []\n shortestDist = sys.maxint\n for car in self.carsPosition.itervalues():\n if car.isGoalFlag:\n next.append(car)\n continue\n if car.position is None:\n print \"the car has no position\"\n continue\n if car.car.id == carPos.car.id:\n continue\n distance = car.position - carPos.position\n if not car.free and (0 < distance < shortestDist): # only pick the cars in front of current car\n shortestDist = distance\n next.append(car)\n return next",
"def route_info(g, journey):\n distance = 0\n cost = 0.00\n time = 0\n check = 0\n \n for i in range(0, len(journey) - 1):\n city_name = journey[i]\n city_next = journey[i + 1]\n code_city = g.convert[city_name] \n code_next = g.convert[city_next]\n \n for flight in g.city_dict[code_city].get_flights_out():\n if(flight[0] == code_next):\n distance = distance + flight[1]\n time = time + route_info_helper(g, code_city, code_next, flight[1])\n if(i < 7):\n cost = cost + (distance * (0.35 - (i * 0.05)))\n \n check = check + 1\n if((check + 1) == len(journey)):\n return distance, cost, time\n else:\n print(\"Invalid Route\")\n return 0, 0, 0",
"def get_next_departure(self,day,link,current_time): \n if link not in self.data[day]:\n return None\n import numpy as np\n index=[np.searchsorted(self.data[day][link][0:,0],current_time)]\n print(len(self.data[day][link]))\n print(index) \n if index[0] < len(self.data[day][link]):\n return self.data[day][link][index[0]]\n else:\n return None",
"def test_get_next_meeting():\n result = schedule.get_next_meeting()\n\n if result:\n assert result['name'], 'Result has no `name` key'\n assert result['date'], 'Result has not `date` key'\n\n assert isinstance(result['name'], str), 'name is not a string'\n assert isinstance(result['date'], arrow.Arrow), 'date is not a date'",
"def next_run(self):\n for run in self._runs:\n # Because the runs are ordered, look for the first run where\n # stop_time is in the future.\n if run.is_next_run(self._now):\n return run\n # If we arrive here, no next run (today).\n return None",
"def get_next_day(self):\n pass",
"def road_next(self, vehicle, on_arrive, time):\n events = []\n edge = vehicle.current\n if edge is not None:\n # leave previous edge\n edge['occupancy'] -= self.roads.vehicle_size\n if edge['occupancy'] < 0:\n raise Exception('occupancy should be positive')\n vehicle.route.pop(0)\n self.data['road_capacities'][edge['id']].append((float(edge['occupancy']), float(time)))\n\n # compute next leg\n leg = self.road_travel(vehicle.route, vehicle.type)\n\n # random accidents\n if random.random() < config.BASE_ACCIDENT_PROB:\n # sort edges by occupancy\n edges = [(e[:3], e[-1]['occupancy'])\n for e in self.roads.network.edges(keys=True, data=True)\n if e[-1]['occupancy'] > 0]\n if len(edges) > 0:\n edges, probs = zip(*edges)\n edge_idxs = range(len(edges))\n probs = probs/np.sum(probs)\n edge_idx = np.random.choice(edge_idxs, 1, p=probs)[0]\n edge = edges[edge_idx]\n self.roads.network.edges[edge]['accident'] = True\n logger.debug('Accident occurred at edge: {}'.format(edge))\n\n # Accident cleared up event\n clear_time = random.randint(*config.ACCIDENT_CLEAR_TIME)\n events.append((time+clear_time, partial(self.clear_accident, edge)))\n\n # arrived\n if leg is None:\n return events + on_arrive(time)\n\n # TODO replanning can occur here too,\n # e.g. if travel_time exceeds expected travel time\n leg, edge, travel_time = leg\n\n # enter edge\n edge['occupancy'] += self.roads.vehicle_size\n if edge['occupancy'] <= 0:\n raise Exception('adding occupant shouldnt make it 0')\n self.data['road_capacities'][edge['id']].append((float(edge['occupancy']), float(time)))\n\n vehicle.current = edge\n\n # cast to avoid errors with serializing numpy types\n if self.save_history and time >= self.history_window[0] and time <= self.history_window[1]:\n self.history[vehicle.id].append((float(time), float(travel_time), leg))\n\n # return next event\n # TODO this assumes agents don't stop at\n # lights/intersections, so should add in some time,\n # but how much?\n # or will it not affect the model much?\n return events + [(travel_time, partial(self.road_next, vehicle, on_arrive))]",
"def road_travel(self, path, vehicle_type):\n # last node in path\n # is destination\n if len(path) < 2:\n return\n\n leg = path[0]\n if vehicle_type is VehicleType.Public:\n edge = self.transit_roads.network[leg.frm][leg.to][leg.edge_no]\n else:\n edge = self.roads.network[leg.frm][leg.to][leg.edge_no]\n\n # where leg.p is the proportion of the edge we travel\n time = self.roads.router.edge_travel_time(edge) * leg.p\n\n return leg, edge, time",
"def compute_travel_time(start_id, dest_id, csv, G):\n \n # route is not computed yet\n if csv[start_id][dest_id] is None:\n return -1\n\n travel_time = 0\n cur_node_id = start_id\n\n while cur_node_id != dest_id:\n \n # get the next node on this route\n next_node_id = csv[cur_node_id][dest_id]\n\n # update the travel time\n edge = G.get_edge_data(cur_node_id, next_node_id)\n travel_time += edge[0]['travel_time']\n\n cur_node_id = next_node_id\n\n return travel_time",
"def getLinearSchedule(self, schedule, startTime):\n\t\ttime_step = self.time_step\n\t\tvmax = self.maximam_velocity_vector\n\t\ttrips = self.no_of_trips_vector\n\t\tdst = self.dst\n\n\t\tfor fIndex,fItem in enumerate(schedule):\n\t\t\tforwardDirection = True\n\t\t\ttripNo = 1\n\t\t\tfor tIndex, tItem in enumerate(fItem):\n\t\t\t\tposition = (vmax[fIndex] * ((time_step * tIndex) + startTime[fIndex]))\n\t\t\t\trangeStart = game_utility.findRangeStart(position, dst)\n\t\t\t\tif(position > dst and (rangeStart/dst)%2 != 0):\n\t\t\t\t\t# RETURNING FERRY\n\t\t\t\t\tposition = dst - (position - rangeStart)\n\t\t\t\t\tif(forwardDirection):\n\t\t\t\t\t\t#tripNo = tripNo + 1\n\t\t\t\t\t\tforwardDirection = False\n\t\t\t\t\t\t#print(\"return\", position)\n\t\t\t\telif (position > dst and (rangeStart/dst)%2 == 0):\n\t\t\t\t\t# MOVING FORWARD FERRY\n\t\t\t\t\tposition = position - rangeStart;\n\t\t\t\t\tif(not forwardDirection):\n\t\t\t\t\t\ttripNo = tripNo + 1\n\t\t\t\t\t\tforwardDirection = True\n\t\t\t\t\t\t#print(\"forward\", position)\n\t\t\t\t#print(format(max(game_utility.normalize(position, dst), 0.0), '.2f'))\n\t\t\t\t#print(rangeStart)\n\t\t\t\tif(tripNo > trips[fIndex]):\n\t\t\t\t\tposition = 0\n\t\t\t\tschedule[fIndex][tIndex] = format(max(game_utility.normalize(position, dst), 0.0), '.2f')\n\t\treturn schedule",
"def on_bus_arrive(self, road_vehicle, transit_vehicle, time):\n # pick-up/drop-off\n events = self.transit_next(transit_vehicle, time)\n cur_stop = transit_vehicle.route[transit_vehicle.current]\n\n if self.debug:\n last_dep = self.last_deps.get(transit_vehicle.id)\n if last_dep is not None:\n # we compare scheduled vs actual travel times rather than\n # scheduled vs actual arrival times to control for drift.\n # if we compared arrival times, earlier days would accumulate\n # and likely cause all subsequent arrivals to be delayed.\n # comparing travel times avoids this and also\n # lets us better diagnose where the largest travel time\n # discrepancies are.\n last_stop = transit_vehicle.route[transit_vehicle.current-1]\n scheduled_travel_time = cur_stop['arr_sec'] - last_stop['dep_sec']\n actual_travel_time = time - last_dep\n delay = actual_travel_time - scheduled_travel_time\n\n # for calibration, want to take the absolute value\n if abs(delay) > config.ACCEPTABLE_DELAY_MARGIN:\n logger.warn('BUS TRAVEL EXCEPTION: DELAYED {:.2f}min'.format(delay/60))\n self.delays.append(delay)\n\n # prepare to depart\n try:\n next_stop = transit_vehicle.route[transit_vehicle.current + 1]\n time_to_dep = cur_stop['dep_sec'] - cur_stop['arr_sec']\n\n # for tracking travel times\n self.last_deps[transit_vehicle.id] = time + time_to_dep\n except IndexError:\n # trip is done\n return events\n\n # so we don't double-leave the current edge\n # otherwise we get negative occupancies in roads\n road_vehicle.current = None\n\n # setup on arrive trigger\n on_arrive = partial(self.on_bus_arrive, road_vehicle, transit_vehicle)\n\n # figure out road route\n try:\n start, end = cur_stop['stop_id'], next_stop['stop_id']\n if self.cache_routes and (start, end) in self.route_cache:\n route = self.route_cache[(start, end)][:]\n else:\n route = self.transit_roads.route_bus(start, end)\n self.route_cache[(start, end)] = route[:]\n\n # update route\n road_vehicle.route = route\n\n # override last event\n events[-1] = (time_to_dep, partial(self.road_next, road_vehicle, on_arrive))\n except NoRoadRouteFound:\n # this seems to occur if the GTFS bus stop lat/lons\n # are inaccurate, so when we map the stop position to\n # a road network position, it maps incorrectly.\n\n # get inferred stop position on road network\n if self.debug:\n start_pt = (\n self.transit_roads.stops[start].pt.x,\n self.transit_roads.stops[start].pt.y)\n end_pt = (\n self.transit_roads.stops[end].pt.x,\n self.transit_roads.stops[end].pt.y)\n self.road_route_failures.add(((start_pt, end_pt), (start, end)))\n logger.warn('Ignoring no road route found! (STOP{} -> STOP{}) Falling back to bus schedule.'.format(start, end))\n\n # as a fallback, just assume the bus arrives on time\n # this is really not ideal, because we pull the bus out of traffic\n # and so it becomes unaffected by congestion, and can't participate\n # in congestion\n scheduled_travel_time = next_stop['arr_sec'] - cur_stop['dep_sec']\n events[-1] = (time_to_dep, lambda time: [(scheduled_travel_time, on_arrive)])\n\n return events",
"def next_move(self):\n\n # Calculate all paths to destination from current location and time.\n solution = self.calculate_best_solution((None, None), self.currentTurn, [self.character.path[-1]],\n self.character.spent)\n\n # Add travel weight to spent.\n if solution[1] is not None and solution[1][0] != solution[1][1]:\n self.character.spent += self.pekingMap.get_vertex(solution[1][0]).weight(solution[1][1])\n\n # Return next point in shortest path to location.\n if solution[1] is not None:\n return solution[1][1]\n\n return None",
"def fetch_next_match() -> Optional[MatchDict]:\n future_matches = Match.objects.filter(start_date_time__gt=timezone.now())\n\n if not any(future_matches):\n return None\n\n next_match = min(future_matches, key=lambda match: match.start_date_time)\n\n return {\n \"round_number\": next_match.round_number,\n \"season\": next_match.start_date_time.year,\n }",
"def blablacar_journey(df_response, departure_date, start_point, end_point):\n\n lst_journeys = list()\n # all itineraries :\n # print(f'nb itinerary : {df_response.id_global.nunique()}')\n _id = 0\n for trip_id in df_response.trip_id.unique():\n itinerary = df_response[df_response.trip_id == trip_id]\n # Get the arrival info on the same line\n itinerary['date_time_arrival'] = itinerary.date_time.shift(-1)\n itinerary['city_arrival'] = itinerary.city.shift(-1)\n itinerary['address_arrival'] = itinerary.address.shift(-1)\n itinerary['latitude_arrival'] = itinerary.latitude.shift(-1)\n itinerary['longitude_arrival'] = itinerary.longitude.shift(-1)\n\n # boolean to know whether and when there will be a transfer after the leg\n itinerary['next_departure'] = itinerary.date_time.shift(1)\n\n # Get rid of the \"last line\" for the last leg of the blablacar trip\n itinerary = itinerary[~pd.isna(itinerary.city_arrival)]\n\n # Divide price between legs weighted by distance and distance\n itinerary['total_distance'] = itinerary.distance_in_meters.sum()\n itinerary['price'] = float(itinerary['price'])\n itinerary['price_leg'] = itinerary.apply(lambda x: x.distance_in_meters / x.total_distance * x.price, axis=1)\n\n i = _id\n lst_sections = list()\n # We add a waiting period at the pick up point of 15 minutes\n #print(itinerary.date_time.get_values())\n #print(type(itinerary.date_time.get_value(0)))\n #print(type(timedelta(seconds=_BLABLACAR_WAITING_PERIOD)))\n #print(itinerary.date_time.get_value(0)-timedelta(seconds=_BLABLACAR_WAITING_PERIOD))\n step = tmw.Journey_step(i,\n _type=constants.TYPE_WAIT,\n label=f'Arrive at pick up point {format_timespan(_BLABLACAR_WAITING_PERIOD)} before departure',\n distance_m=0,\n duration_s=_BLABLACAR_WAITING_PERIOD,\n price_EUR=[0],\n gCO2=0,\n departure_point=[itinerary.latitude.iloc[0], itinerary.longitude.iloc[0]],\n arrival_point=[itinerary.latitude.iloc[0], itinerary.longitude.iloc[0]],\n departure_date=itinerary.date_time.iat[0] - timedelta(seconds=_BLABLACAR_WAITING_PERIOD),\n arrival_date=itinerary.date_time.iat[0],\n bike_friendly=True,\n geojson=[],\n )\n\n lst_sections.append(step)\n i = i + 1\n # Go through all steps of the journey\n for index, leg in itinerary.iterrows():\n local_distance_m = leg.distance_in_meters\n local_transportation_type = constants.TYPE_CAR\n local_emissions = co2_emissions.calculate_co2_emissions(local_transportation_type, constants.DEFAULT_CITY,\n constants.DEFAULT_FUEL, constants.DEFAULT_NB_SEATS,\n constants.DEFAULT_NB_KM) * \\\n constants.DEFAULT_NB_PASSENGERS * local_distance_m\n step = tmw.Journey_step(i,\n _type=constants.TYPE_CARPOOOLING,\n label=f'BlablaCar trip from {leg.city} to {leg.city_arrival}',\n distance_m=local_distance_m,\n duration_s=leg.duration_in_seconds,\n price_EUR=[leg.price_leg],\n gCO2=local_emissions,\n departure_point=[leg.latitude, leg.longitude],\n arrival_point=[leg.latitude_arrival, leg.longitude_arrival],\n departure_stop_name=leg.address + ' ' + leg.city,\n arrival_stop_name=leg.address_arrival + ' ' + leg.city_arrival,\n departure_date=leg.date_time,\n arrival_date=leg.date_time_arrival,\n trip_code='BlaBlaCar_' + str(leg.trip_id),\n bike_friendly=False,\n geojson=[],\n )\n lst_sections.append(step)\n i = i + 1\n # add transfer steps\n if not pd.isna(leg.next_departure):\n step = tmw.Journey_step(i,\n _type=constants.TYPE_TRANSFER,\n label=f'Transfer at {leg.name_arrival_seg}',\n distance_m=0,\n duration_s=(leg['next_departure'] - leg['arrival_date_seg']).seconds,\n price_EUR=[0],\n departure_point=[leg.latitude_arrival, leg.longitude_arrival],\n arrival_point=[leg.latitude_arrival, leg.longitude_arrival],\n departure_stop_name=leg.address_arrival + ' ' + leg.city_arrival,\n arrival_stop_name=leg.address_arrival + ' ' + leg.city_arrival,\n departure_date=leg.date_time_arrival,\n arrival_date=leg.next_departure,\n gCO2=0,\n bike_friendly=False,\n geojson=[],\n )\n lst_sections.append(step)\n i = i + 1\n journey_blablacar = tmw.Journey(_id, steps=lst_sections,\n departure_date=lst_sections[0].departure_date,\n arrival_date=lst_sections[-1].arrival_date,\n booking_link=leg.link)\n # Add category\n category_journey = list()\n for step in journey_blablacar.steps:\n if step.type not in [constants.TYPE_TRANSFER, constants.TYPE_WAIT]:\n category_journey.append(step.type)\n\n journey_blablacar.category = list(set(category_journey))\n lst_journeys.append(journey_blablacar)\n\n # for journey in lst_journeys:\n # journey.update()\n\n return lst_journeys",
"def test_get_next_workshop():\n result = schedule.get_next_workshop()\n\n if result:\n assert result['name'], 'Result has no `name` key'\n assert result['date'], 'Result has not `date` key'\n\n assert isinstance(result['name'], str), 'name is not a string'\n assert isinstance(result['date'], arrow.Arrow), 'date is not a date'",
"def get_next_game(today_game_date: datetime, team_id: int) -> dict:\n\n game_date = today_game_date.strftime(\"%Y-%m-%d\")\n tomorrow = (today_game_date + timedelta(days=1)).strftime(\"%Y-%m-%d\")\n end_date = (today_game_date + timedelta(days=365)).strftime(\"%Y-%m-%d\")\n\n logging.info(\"Checking the schedule API endpoint for the next game.\")\n url = f\"schedule?teamId={team_id}&startDate={game_date}&endDate={end_date}\"\n\n response = api.nhl_api(url)\n if not response:\n return None\n\n next_game_json = response.json()\n next_game = next_game_json.get(\"dates\")[1].get(\"games\")[0]\n\n return next_game",
"def get_next_travel_edge(self, timestamp: float) -> Tuple[int, int]:\n if not self.uses_atis():\n return (self.traveled_nodes[-1][self.NODE_INDEX],\n self.base_route[len(self.traveled_nodes)])\n else:\n return self.atis.get_edge_prediction(\n self.traveled_nodes[-1][self.NODE_INDEX],\n self.base_route[-1],\n timestamp\n )",
"def test_get_next_n_schedule(self):\n expected_list = [datetime.datetime(2021, 8, 7, 8, 46, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 9, 0, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 9, 23, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 9, 46, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 10, 0, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 10, 23, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 10, 46, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 11, 0, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 11, 23, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 11, 46, tzinfo=datetime.timezone.utc)]\n\n from_dt = datetime.datetime(2021, 8, 7, 8, 30, 57, tzinfo=datetime.timezone.utc)\n result = AWSCron.get_next_n_schedule(10, from_dt, '0/23 * * * ? *')\n self.assertEqual(str(expected_list), str(result))",
"def getNextObject(dframe, obj, ctime):\n df = dframe[(dframe['Object']==obj) & (dframe['Date'] > ctime)]\n return df.sort_values('Date', ignore_index = True).iloc[0]",
"def schedule(self) -> pulumi.Input['CanaryScheduleArgs']:\n return pulumi.get(self, \"schedule\")",
"def succ_car_other_lane(self):\n other_lane = self.other_lane()\n\n for i in range(self.road.N):\n if self.road.cells[other_lane][(self.x - (i + 1)) % (self.road.N-1)] != -1:\n return self.road.cars[self.road.cells[other_lane][(self.x - (i + 1)) % (self.road.N-1)]]"
]
| [
"0.56140107",
"0.5598898",
"0.5507337",
"0.5356615",
"0.5313905",
"0.5231977",
"0.5178537",
"0.51480544",
"0.5140548",
"0.5081775",
"0.49440774",
"0.4889892",
"0.48250175",
"0.47803727",
"0.4715545",
"0.47006294",
"0.463806",
"0.46155107",
"0.4605269",
"0.46029308",
"0.45838028",
"0.45714718",
"0.45625544",
"0.4546691",
"0.45251262",
"0.45091358",
"0.44690296",
"0.44686967",
"0.44555366",
"0.44407788"
]
| 0.80938816 | 0 |
return the dir url | def get_dirurl(self, dirpath):
paths = dirpath.split("/")
return "/".join(paths[paths.index("static"):]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Directory(self) -> str:",
"def _get_dir_url(endpoint, path, **kwargs):\n if not path:\n return url_for(endpoint)\n else:\n #if self._on_windows:\n # path = path.replace('\\\\', '/')\n\n kwargs['path'] = path\n\n return url_for(endpoint, **kwargs)",
"def dir(self) -> str:\n return f'{os.path.dirname(self.path)}/'.lstrip('/')",
"def dirname(self):\n _, tail = os.path.split(self.url)\n return self.folder + '/' + tail[:-4]",
"def download_dir(self) -> str:\n return self._download_dir",
"def get_directory() -> str:\n return directory",
"def get_url(self):\r\n if self.mod.filename:\r\n return self.mod.service.get_mirror() + self.mod.filename",
"def direct_url(self):\n #return '%s/getDownloadableFile' % self.absolute_url()\n return self.context.absolute_url()",
"def dirpath(self) -> str:\n return self._dirpath",
"def get_dir(self):\n return self.dir",
"def get_current_directory_uri(self): # real signature unknown; restored from __doc__\n return \"\"",
"def get_dir_path():\n return DIR_PATH",
"def __url(self, object):\n return '/'.join(object.getPhysicalPath())",
"def getDir( self ):\n return self.dir",
"def url(self):\n if not os.path.exists(self.path):\n self.save()\n return self.uset.url(os.path.join(self.folder, self.get_filename()))",
"def __str__(self):\n url = '{}/{}'.format(self.root, self.path)\n return url",
"def directory(self) -> str:\n return self._values.get(\"directory\")",
"def _get_download_dir(self):\n return self.data['info']['root_downloads_dir']",
"def _get_download_dir(self):\n return self.manager.download_dir",
"def get_downloads_dir():\n return _DOWNLOADS_DIR",
"def _get_url(self, absolute):",
"def public_upload_dir_rel(self):\n return os.path.join(self.short_name,settings.COMIC_PUBLIC_FOLDER_NAME)",
"def listdir(self):\n if self._isurl(self._baseurl):\n raise NotImplementedError(\n \"Directory listing of URLs, not supported yet.\")\n else:\n return os.listdir(self._baseurl)",
"def Url(self) -> str:",
"def url(self) -> str:\n return f\"{self._get_base_url()}{self.path_extension}\"",
"def get_directory(self):\n return self.directory",
"def dir_name(self):\n return self._dir",
"def url(self):\n scheme, netloc, path, query, fragment = six.moves.urllib.parse.urlsplit(self.baseurl)\n url = six.moves.urllib.parse.urlunsplit((\n scheme, netloc, path + '.dods',\n self.id + hyperslab(self.slice) + '&' +\n '&'.join(self.selection), fragment)).rstrip('&')\n\n return url",
"def _remote_path(self):\n return self._remote_dir",
"def proxy_dir(self):\n return self.__get_option('proxy_dir')"
]
| [
"0.7273775",
"0.7153738",
"0.6965254",
"0.6904788",
"0.68993217",
"0.6853848",
"0.6754187",
"0.6738337",
"0.6738171",
"0.6716691",
"0.6685926",
"0.6655906",
"0.66186196",
"0.65818834",
"0.6554798",
"0.65474826",
"0.6482278",
"0.64310545",
"0.64290816",
"0.64277285",
"0.64126253",
"0.6394287",
"0.63915884",
"0.63915545",
"0.63675123",
"0.6346804",
"0.6346193",
"0.63374835",
"0.6331902",
"0.6325937"
]
| 0.7773397 | 0 |
filter to return font files only | def filter_fontfiles(self, filenames, d=dict()):
for f in filenames:
n, ext = os.path.splitext(f)
# skip for the files that are not supported
if not ext in SUPPORTED_FONTS: continue
d[n] = d[n] + [ext] if d.get(n) else [ext]
return d | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _GetFontFiles(path):\n return [f for f in listdir(path)\n if os.path.splitext(f)[1] in ('.ttf', '.otf')]",
"def load_all_fonts(directory, accept=(\".ttf\",)):\n return load_all_music(directory, accept)",
"def load_all_fonts(directory, accept=(\".ttf\",)):\n return load_all_music(directory, accept)",
"def available_text_fonts():\n bad = [u'acalc',\n u'acb',\n u'aco',\n u'acp']\n all = available_fonts()\n fonts = []\n for f in all:\n if (f == u'Series 60 ZDigi'):\n continue\n for b in bad:\n try:\n if (f.lower().startswith(b) and f[len(b)].isdigit()):\n break\n except IndexError:\n pass\n else:\n fonts.append(f)\n\n\n\n def compare(a, b):\n return -(a.lower() < b.lower())\n\n\n fonts.sort(compare)\n return fonts",
"def get_fonts(self):\n\n font_path = self.execute_shell([\"figlet\", \"-I2\"])\n\n # get the font files installed in font_path,\n # and clean them up for printing\n fonts = [os.path.split(x)[1].split(\".\")[0] \\\n for x in self.execute_shell([\"find\",\n font_path, \"-iname\", \"*.flf\"]).split(\"\\n\")]\n\n return fonts",
"def process_fonts():\n fonts_path = os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/fonts/')\n static_fonts = os.path.join(settings.BASE_DIR, 'static/CMESH/fonts/')\n\n copy_files(fonts_path, static_fonts)",
"def getfonts(self):\n return self.vffile.getfonts()",
"def test_filter_glyph_list(tmpdir):\n path = \"%s/dummy/font.ufo\" % DATA_DIR\n out = str(tmpdir / basename(path)) + \".out\"\n\n psautohint([path, '-o', out, '-g', 'FOO,BAR,a'])",
"def get_fonts(folder=None):\n fonts = {}\n if folder:\n cmd = ['fc-scan', '--format', '\"%{file}:%{family}:style=%{style}\\n\"', folder]\n else:\n cmd = ['fc-list', ':', 'file', 'family', 'style']\n for line in subprocess.check_output(cmd).decode('utf-8').split(\"\\n\"):\n logger.debug(line)\n line.strip()\n if not line: continue\n if 'otf' not in line and 'ttf' not in line: continue\n parts = line.split(':')\n path = parts[0]\n families = parts[1].strip().split(',')\n styles = parts[2].split('=')[1].split(',')\n if len(families) == 1 and len(styles) > 1:\n families = [families[0]] * len(styles)\n elif len(families) > 1 and len(styles) == 1:\n styles = [styles[0]] * len(families)\n if len(families) != len(styles):\n logger.debug(\"Problem with this font: \" + line)\n continue\n for i in range(len(families)):\n try: fonts[families[i]]\n except: fonts[families[i]] = dict()\n fonts[families[i]][styles[i]] = path\n logger.debug(\"Added this font: \" + str((families[i], styles[i], path)))\n return fonts",
"def _styles(self):\n for filename in os.listdir(self.dirpath):\n name, ext = os.path.splitext(filename.lower())\n if ext in self.EXTENSIONS and name != self.DEFAULT:\n yield name",
"def filter_target_extensions(self, files_dict):\n files_filtered = defaultdict(list)\n supported_formats = self.sox_get_supported_formats()\n logging.info('Filtering audio files ...')\n paths = list(files_dict.keys())\n\n for path in paths:\n if not path.endswith('letmehear'):\n files = sorted(files_dict[path])\n for f in files:\n if os.path.splitext(f)[1].lstrip('.').lower() in supported_formats:\n files_filtered[path].append(f)\n return files_filtered",
"def file_filter(file_name):\n extensions = get_setting('file_extensions', [])\n if not extensions: return True\n return True in [file_name.endswith(ext) for ext in extensions]",
"def strip_eps_font(filename):\r\n inf = open(filename)\r\n filecache = []\r\n in_ttf = False\r\n for line in inf:\r\n if \"Bitstream\" in line:\r\n line = line.replace(\"BitstreamVeraSans-Roman\", \"Arial\")\r\n if line.startswith(\"\"\"%%BeginFont\"\"\"):\r\n in_ttf = True\r\n if line.startswith(\"\"\"%%EndFont\"\"\"):\r\n in_ttf = False\r\n continue\r\n if in_ttf:\r\n continue\r\n else:\r\n filecache.append(line)\r\n\r\n inf.close()\r\n ouf = open(filename, \"w+\")\r\n ouf.write(''.join(filecache))\r\n ouf.close()",
"def search(self):\n files = os.listdir(self.filePath)\n txt_file = []\n for f in files:\n f_ext = f.split('.')[-1]\n if f_ext == self.flag:\n if self.flag == 'txt':\n txt_file.append(FileTxt(os.sep.join([self.filePath, f])))\n\n if self.flag == 'csv':\n txt_file.append(FileCsv(os.sep.join([self.filePath, f])))\n\n return txt_file",
"def fontforge_skip_checks():\n return None",
"def fontforge_skip_checks():\n return None",
"def filter_files(self, pattern, filter_fn=None):\n def filter_function(f):\n return re.search(pattern, f) != None\n if not filter_fn:\n filter_fn = filter_function\n return filter(filter_fn, self.files)",
"def getAvailableFonts():\n return list(AVAILABLE_FONTS)",
"def get_fonts():\r\n return pygame.font.get_fonts()",
"def get_font_filters(self) -> list[FontFilterStr]:\n return FONT_FILTERS.get_list(self.fontFilters())",
"def _recursivelyCollectFontPaths(path, fontPaths):\n for fileName in os.listdir(path):\n filePath = path + '/' + fileName\n if os.path.isdir(filePath):\n _recursivelyCollectFontPaths(filePath, fontPaths)\n else:\n extension = fileName.split('.')[-1].lower()\n if extension in ('ttf', 'otf', 'svg'):\n fontPaths[fileName] = filePath",
"def filterImages(files, cfg):\r\n regex = \"\\.(\" + \"|\".join(cfg.image_formats) + \")$\"\r\n #filter(lambda s: re.match(regex, s), files)\r\n return [s for s in files if re.findall(regex, s)]",
"def test_check_more_than_one_fontName(self):\n fonts = []\n for css_class in self.pisa_doc.css[0].values():\n for font in css_class.values():\n fonts.append(font)\n for font in fonts:\n if isinstance(font,list):\n result = font\n break\n #here we are checking if fonts in pdf-doc contain a font-name list\n self.assertIsInstance(result,list)",
"def get_system_fonts():\n fonts = set()\n for x in font_manager.findSystemFonts():\n dot = x.rfind('.')\n slash = x.rfind(sep)\n x = x[slash + 1:dot]\n fonts.add(x)\n return sorted(fonts)",
"def openTTFonts(path):\n\tfrom fontTools import ttLib\n\tfonts = []\n\tsfnts = getSFNTResIndices(path)\n\tif not sfnts:\n\t\tfonts.append(ttLib.TTFont(path))\n\telse:\n\t\tfor index in sfnts:\n\t\t\tfonts.append(ttLib.TTFont(path, index))\n\t\tif not fonts:\n\t\t\traise ttLib.TTLibError(\"no fonts found in file '%s'\" % path)\n\treturn fonts",
"def test_font_on_disk_family_equal_in_metadata_json(self):\n contents = self.read_metadata_contents()\n metadata = Metadata.get_family_metadata(contents)\n\n unmatched_fonts = []\n for font_metadata in metadata.fonts:\n try:\n font = Font.get_ttfont_from_metadata(self.operator.path,\n font_metadata)\n except IOError:\n continue\n if font.familyname != font_metadata.name:\n unmatched_fonts.append(font_metadata.filename)\n\n if unmatched_fonts:\n msg = 'Unmatched family name are in fonts: {}'\n self.fail(msg.format(', '.join(unmatched_fonts)))",
"def SupportedFiletypes( self ):\n return ['plaintex', 'tex']",
"def SupportedFiletypes( self ):\n return ['plaintex', 'tex']",
"def test_metadata_regular_is_normal(self):\n have = False\n for x in self.metadata.fonts:\n if x.full_name.endswith('Regular') and x.style == 'normal':\n have = True\n self.assertTrue(have)",
"def filter_files(path, string):\n try:\n listing = os.listdir(path)\n return [f for f in listing if string in f]\n except:\n raise ValueError(\"Error in upy.contrib.tree.menu @ filter_files()\")"
]
| [
"0.73781276",
"0.6627336",
"0.6627336",
"0.6314944",
"0.62431806",
"0.6171882",
"0.610995",
"0.6020973",
"0.59870565",
"0.5923361",
"0.5881843",
"0.58167106",
"0.5775011",
"0.5754809",
"0.57511044",
"0.57511044",
"0.57372826",
"0.57291114",
"0.5669524",
"0.56519204",
"0.56480056",
"0.5643635",
"0.56061715",
"0.5602047",
"0.5599182",
"0.5568051",
"0.55182296",
"0.55182296",
"0.55022013",
"0.5499713"
]
| 0.70424294 | 1 |
Tests the home page. | def test_home(self):
response = self.client.get('/')
self.assertContains(response, 'Home Page', 1, 200) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_home(self):\n\t\tresponse = self.client.get('/')\n\t\tself.assertContains(response, 'Home Page', 1, 200)",
"def test_home_page(self):\r\n url = reverse('home')\r\n response = self.client.get(url)\r\n\r\n self.assertEqual(response.status_code, 200)",
"def test_home_page(self):\n\n self.browser.get('http://localhost:8000/index.html')\n\n # there is a page title defined by <title></title> on the home page\n # check it\n\n self.assertIn('Stability within Movement',self.browser.title)\n\n # You will have an image for your home page I am assuming.\n # Put the name of your image here in place of homebrew.png\n # In general this is how we check for images on a page.\n\n # The user sees an image of sun hitting the Washington Monument\n\n m=self.browser.find_element_by_tag_name('img')\n self.assertIn('help.jpg',m.get_attribute('src'))\n\n a=self.browser.find_element_by_id('sun')\n a.click()\n\n self.assertIn('sun',self.browser.title)\n\n h=self.browser.find_element_by_tag_name('h1')\n\n m=self.browser.find_element_by_tag_name('img')\n\n # the user goes back to the home page\n # self.browser.back()\n self.browser.get('http://localhost:8000/index.html')\n\n # the user sees at the bottom of the page a link to credits\n l=self.browser.find_element_by_link_text('Credits')\n\n # the user clicks on the credits link\n l.click()\n # and sees the credits.html page\n a=self.browser.current_url\n self.assertIn(\"credits.html\",a)",
"def test_home(self):\n\n response = self.client.get(reverse('home'))\n\n assert response.status_code == 200",
"def test_home(self):\n response = self.app.get(\"/\")\n self.assertTrue(response.status_code, 200)",
"def test_homepage_view(self):\n response = self.client.get(url_for('home'))\n self.assertEqual(response.status_code, 200)",
"def test_homepage_view(self):\n response = self.client.get(url_for('home'))\n self.assertEqual(response.status_code, 200)",
"def test_homepage(self):\n\n result = self.client.get(\"/\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Homepage\", result.data)",
"def test_homepage(self):\n rc = self.app.get('/')\n assert b'Welcome to Code TA' in rc.data\n assert b'Logout' not in rc.data",
"def test_home(self):\n result = self.app.get('/')\n self.assertEqual(result.status_code, 200)",
"def test_home(self):\n response = self.client.get('/')\n self.assert_200(response)\n self.assert_template_used('index.html')",
"def test_given_home_page_behavior(self):\n res = self.client().get('/')\n self.assertEqual(res.status_code, 200)\n json_res = json.loads(res.get_data(as_text=True))\n self.assertEqual('Home page', json_res['message'])",
"def test_homepage(self):\r\n\r\n result = self.client.get(\"/\")\r\n self.assertIn(b\"Welcome!\", result.data)",
"def test_homepage(self):\n rv = self.app.get('/')\n assert 'Enter your url here' in rv.data",
"def test_home(self):\n\n with self.client:\n result = self.client.get('/users')\n self.assertEqual(result.status_code, 200)\n self.assertIn(b'<h1 class=\"col-2\">Users</h1>', result.data)",
"def test_home_exists(self):\n response = self.app.get('/')\n self.assertEqual(response.status_code, 200)",
"def test_homepage(self):\n \n result = self.client.get(\"/\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"What type of user are you?\", result.data)",
"def test_homepage(self):\n\n response = self.client.get(\"/\")\n self.assertIn(\"Books</title>\", response.data)\n self.assertIn(\"Goodreads ID\", response.data)",
"def test_home_content(self):\n bs = self.get_soup(baseUrl)\n self.assertOneExists(bs, \"#page_discover\")",
"def test_home(self):\n self.selenium.get('{}/'.format(self.live_server_url))",
"def test_template_home(self):\n self.assertTemplateUsed(self.response, 'index.html')",
"def test_index(self):\n response = self.client.get('')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'home/index.html')",
"def test_main_page(self):\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n content = response.content.decode('utf-8')\n self.assertTrue('Improving the FOIA request experience' in content)",
"def test_if_home_is_successful(client):\n\n url = reverse(\"home\")\n response = client.get(url)\n assert response.status_code == 200",
"def test_homepage_render(self):\n\n result = self.client.get(\"/\")\n self.assertIn(\"<h1 class=\\\"title\\\">Bark Park!</h1>\", result.data)",
"def test_home(client):\n rv = client.get('/')\n assert 200 == rv.status_code",
"def test_homepage(self):\n\n with self.client as client:\n response = client.get('/')\n html = response.get_data(as_text=True)\n self.assertEqual(response.status_code, 200)\n self.assertIn('<table class=\"board\">', html)\n self.assertIn('<table', html)\n self.assertIn('boggle homepage. used in testing', html)\n # test that you're getting a template",
"def test_view_home(self):\n testUser = User.objects.create_user(username=\"testUser\", email = \"[email protected]\", password=\"testPassword\")\n uA = create_user(user=testUser, first_name=\"John\", last_name=\"Doe\", major='', bio='')\n login = self.client.force_login(testUser)\n url = reverse('login:home')\n response = self.client.get(url, follow=True)\n self.assertContains(response, \"Are you ready\")",
"def test_home(self):\n res = self.client.get(\"/\")\n data = res.data.decode(\"utf-8\")\n assert res.status == \"200 OK\"\n assert \"Gandalf\" in data",
"def test_01_index(self):\r\n res = self.app.get(\"/\", follow_redirects=True)\r\n assert self.html_title() in res.data, res\r\n assert \"Create an App\" in res.data, res"
]
| [
"0.86721593",
"0.83283335",
"0.8285781",
"0.82591826",
"0.82452273",
"0.82392025",
"0.82392025",
"0.8199806",
"0.8114395",
"0.8091571",
"0.8031986",
"0.7946267",
"0.79409236",
"0.79266375",
"0.7866116",
"0.7856337",
"0.7807612",
"0.7719456",
"0.7685034",
"0.7626273",
"0.7593646",
"0.7538487",
"0.75284004",
"0.7514524",
"0.7510136",
"0.75043875",
"0.74783105",
"0.7472915",
"0.7420776",
"0.7376298"
]
| 0.85700876 | 1 |
Return callback for LoadLibraryA | def LoadLibraryA_rtn_handler(exec_ctx):
logging.info("kernel32.dll.LoadLibraryA returned 0x%08x" % \
exec_ctx.regs.EAX)
# TODO: check / update list of hooks also
pybox.MODULES.update()
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def initialize_library():\n # return the handle to the shared object\n if os.name == \"nt\":\n pass\n # libc = load_windows_dll()\n else:\n libc = load_linux_so()\n return libc",
"def _load_libs():\n if sys.platform == \"darwin\":\n lib_name = \"libtiledbvcf.dylib\"\n elif sys.platform == \"win32\":\n lib_name = \"tiledbvcf.dll\"\n else:\n lib_name = \"libtiledbvcf.so\"\n\n try:\n # Try loading the bundled native library.\n lib_dir = os.path.dirname(os.path.abspath(__file__))\n ctypes.CDLL(os.path.join(lib_dir, lib_name))\n except OSError as e:\n # Otherwise try loading by name only.\n ctypes.CDLL(lib_name)",
"def init():\n \n # ensure that required libraries are loaded\n LoadLibrary = ctypes.windll.kernel32.LoadLibraryA\n LoadLibrary(\"kernel32.dll\")\n\n hooks = [ (\"kernel32.dll\", \"LoadLibraryA\", LoadLibraryA_handler),\n (\"kernel32.dll\", \"GetProcAddress\", GetProcAddress_handler),\n (\"kernel32.dll\", \"malloc\", malloc_handler),\n (\"kernel32.dll\", \"memset\", memset_handler),\n (\"kernel32.dll\", \"ReadProcessMemory\", ReadProcessMemory_handler),\n (\"kernel32.dll\", \"VirtualAllocEx\", VirtualAllocEx_handler),\n (\"kernel32.dll\", \"WriteProcessMemory\", \\\n WriteProcessMemory_handler),\n ]\n\n for (dll_name, func_name, handler) in hooks:\n if not pybox.register_hook(dll_name,\n func_name,\n handler):\n logging.error(\"Failed to register hook for %s\" % func_name)\n \n return",
"def __load_lib(self):\n if self.__databroker:\n return\n\n import platform\n # is_64bit = platform.architecture()[0] == '64bit'\n is_64bit = False\n\n import os\n dll_path = os.path.join(os.path.dirname(__file__), 'dlls')\n lib_path = os.path.join(os.path.dirname(__file__), 'libs')\n cur_wd = os.getcwd()\n try:\n if utility.is_windows():\n info('The platform is WINDOWS.')\n os.chdir(dll_path)\n self.__databroker = ctypes.windll.LoadLibrary('databroker.dll')\n os.chdir(cur_wd)\n elif utility.is_linux():\n info('The platform is LINUX')\n ctypes.CDLL(lib_path+'/libboost_chrono.so.1.59.0', mode=ctypes.RTLD_GLOBAL)\n ctypes.CDLL(lib_path+'/libPBEngine.so', mode=ctypes.RTLD_GLOBAL)\n ctypes.CDLL(lib_path+'/libosisolate.so', mode=ctypes.RTLD_GLOBAL)\n ctypes.CDLL(lib_path+'/libencrypt_md5.so', mode=ctypes.RTLD_GLOBAL)\n ctypes.CDLL(lib_path+'/libcommon.so', mode=ctypes.RTLD_GLOBAL)\n ctypes.CDLL(lib_path+'/libnetutility.so', mode=ctypes.RTLD_GLOBAL)\n ctypes.CDLL(lib_path+'/libsipua.so', mode=ctypes.RTLD_GLOBAL)\n ctypes.CDLL(lib_path+'/librtprtcp.so', mode=ctypes.RTLD_GLOBAL)\n ctypes.CDLL(lib_path+'/libsdpdecoder.so', mode=ctypes.RTLD_GLOBAL)\n ctypes.CDLL(lib_path+'/libencrypt_vivostrcodec.so', mode=ctypes.RTLD_GLOBAL)\n ctypes.CDLL(lib_path+'/libActiveDirectoryClient.so', mode=ctypes.RTLD_GLOBAL)\n ctypes.CDLL(lib_path+'/libntlm.so', mode=ctypes.RTLD_GLOBAL)\n if is_64bit is True:\n self.__databroker = ctypes.CDLL(lib_path+'/libDataBroker.so', mode=ctypes.RTLD_GLOBAL)\n else:\n self.__databroker = ctypes.CDLL(lib_path+'/libDataBroker.so', mode=ctypes.RTLD_GLOBAL)\n else:\n raise PlatformError('Do not support the system!')\n except Exception as e:\n print('%s' % e)\n raise LoadDBRKLibError(\"Load DataBroker Failed!\")",
"def init_functions(dll):\n # Asap3Init\n dll.Asap3Init.argtypes = (\n ctypes.POINTER(TAsap3Hdl),\n ctypes.c_ulong,\n ctypes.c_char_p,\n ctypes.c_ulong,\n ctypes.c_bool,\n )\n dll.Asap3Init.restype = ctypes.c_bool\n\n # Asap3Init2\n dll.Asap3Init2.argtypes = (\n ctypes.POINTER(TAsap3Hdl),\n ctypes.c_ulong,\n ctypes.c_char_p,\n ctypes.c_ulong,\n ctypes.c_ulong,\n ctypes.c_bool,\n )\n dll.Asap3Init2.restype = ctypes.c_bool\n\n # Asap3Init3\n dll.Asap3Init3.argtypes = (\n ctypes.POINTER(TAsap3Hdl),\n ctypes.c_ulong,\n ctypes.c_char_p,\n ctypes.c_ulong,\n ctypes.c_ulong,\n ctypes.c_bool,\n ctypes.c_bool,\n )\n dll.Asap3Init3.restype = ctypes.c_bool\n\n # Asap3Init4\n dll.Asap3Init4.argtypes = (\n ctypes.POINTER(TAsap3Hdl),\n ctypes.c_ulong,\n ctypes.c_char_p,\n ctypes.c_ulong,\n ctypes.c_ulong,\n ctypes.c_bool,\n ctypes.c_bool,\n ctypes.c_bool,\n )\n dll.Asap3Init4.restype = ctypes.c_bool\n\n # Asap3Init5\n dll.Asap3Init5.argtypes = (\n ctypes.POINTER(TAsap3Hdl),\n ctypes.c_ulong,\n ctypes.c_char_p,\n ctypes.c_ulong,\n ctypes.c_ulong,\n ctypes.c_bool,\n ctypes.c_bool,\n ctypes.c_bool,\n ctypes.c_bool,\n )\n dll.Asap3Init5.restype = ctypes.c_bool\n\n # Asap3Init6\n dll.Asap3Init6.argtypes = (\n ctypes.POINTER(TAsap3Hdl),\n ctypes.c_ulong,\n ctypes.c_char_p,\n ctypes.c_ulong,\n ctypes.c_ulong,\n ctypes.c_bool,\n ctypes.c_bool,\n ctypes.c_bool,\n ctypes.c_bool,\n ctypes.POINTER(TApplicationID),\n )\n dll.Asap3Init6.restype = ctypes.c_bool\n return dll",
"def _ll_cb(code, event_code, data_ptr):\n return self._ll_hdl(code, event_code, data_ptr)",
"def callback(fun):\n return ffi.callback(_callback_type, fun)",
"def load():\n if idaapi.get_root_filename() is None:\n # No idb open yet\n def handler(event, old=0):\n if event == idaapi.NW_OPENIDB:\n _do_load()\n elif event == idaapi.NW_TERMIDA:\n idaapi.notify_when(idaapi.NW_TERMIDA | idaapi.NW_OPENIDB | idaapi.NW_REMOVE, handler)\n def _install():\n idaapi.notify_when(idaapi.NW_TERMIDA | idaapi.NW_OPENIDB, handler)\n # return -1 to remove the timer\n return -1\n # It's possible we can't use the notify_when API call yet when IDA opens\n # so try register a timer to add the event listner in the proper \"state\"\n idaapi.register_timer(1, _install)\n else:\n # IDA is fully loaded and an idb is open, just load the plugin.\n _do_load()",
"def load(self,path):\n try:\n # Darwin requires dlopen to be called with mode RTLD_GLOBAL instead\n # of the default RTLD_LOCAL. Without this, you end up with\n # libraries not being loadable, resulting in \"Symbol not found\"\n # errors\n if sys.platform == 'darwin':\n return ctypes.CDLL(path, ctypes.RTLD_GLOBAL)\n else:\n return ctypes.cdll.LoadLibrary(path)\n except OSError,e:\n raise ImportError(e)",
"def load(self,path):\n try:\n # Darwin requires dlopen to be called with mode RTLD_GLOBAL instead\n # of the default RTLD_LOCAL. Without this, you end up with\n # libraries not being loadable, resulting in \"Symbol not found\"\n # errors\n if sys.platform == 'darwin':\n return ctypes.CDLL(path, ctypes.RTLD_GLOBAL)\n else:\n return ctypes.cdll.LoadLibrary(path)\n except OSError,e:\n raise ImportError(e)",
"def load_library(self,libname):\n paths = self.getpaths(libname)\n\n for path in paths:\n if os.path.exists(path):\n return self.load(path)\n\n raise ImportError(\"%s not found.\" % libname)",
"def load_library(self,libname):\n paths = self.getpaths(libname)\n\n for path in paths:\n if os.path.exists(path):\n return self.load(path)\n\n raise ImportError(\"%s not found.\" % libname)",
"def __init__(self, libpath):\n self._lib = CDLL(libpath)\n self._functions = {}",
"def load_function(library, function_name,\n module_type=MODULE_TYPE_EXECUTE_SIMPLE):\n function = getattr(library, function_name, None)\n if not function:\n function = getattr(library, function_name + \"_\", None)\n\n if function:\n if module_type == MODULE_TYPE_EXECUTE_SIMPLE:\n function.argtypes = [ctypes.c_voidp]\n function.restype = ctypes.c_int\n elif module_type == MODULE_TYPE_EXECUTE_CONFIG:\n function.argtypes = [ctypes.c_voidp, ctypes.c_voidp]\n function.restype = ctypes.c_int\n elif module_type == MODULE_TYPE_SETUP:\n function.argtypes = [ctypes.c_voidp]\n function.restype = ctypes.c_voidp\n elif module_type == MODULE_TYPE_CLEANUP:\n function.argtypes = [ctypes.c_voidp]\n function.restype = ctypes.c_int\n else:\n raise ValueError(\"Unknown module type passed to load_interface\")\n return function",
"def dlopen(ffi, *names):\r\n for name in names:",
"def load_library(name):\n with _stderr_capture() as err:\n gSystem = gbl.gSystem\n if name[:3] != 'lib':\n if not gSystem.FindDynamicLibrary(gbl.CppyyLegacy.TString(name), True) and\\\n gSystem.FindDynamicLibrary(gbl.CppyyLegacy.TString('lib'+name), True):\n name = 'lib'+name\n sc = gSystem.Load(name)\n if sc == -1:\n raise RuntimeError('Unable to load library \"%s\"%s' % (name, err.err))\n return True",
"def cwipc_realsense2_dll_load(libname : Optional[str]=None):\n global _cwipc_realsense2_dll_reference\n if _cwipc_realsense2_dll_reference: return _cwipc_realsense2_dll_reference\n \n with _cwipc_dll_search_path_collection(None) as loader:\n if libname == None:\n libname = 'cwipc_realsense2'\n if not os.path.isabs(libname):\n libname = loader.find_library(libname)\n if not libname:\n raise RuntimeError('Dynamic library realsense2 not found')\n assert libname\n _cwipc_realsense2_dll_reference = ctypes.CDLL(libname)\n if not _cwipc_realsense2_dll_reference:\n raise RuntimeError(f'Dynamic library {libname} cannot be loaded')\n \n _cwipc_realsense2_dll_reference.cwipc_realsense2.argtypes = [ctypes.c_char_p, ctypes.POINTER(ctypes.c_char_p), ctypes.c_ulong]\n _cwipc_realsense2_dll_reference.cwipc_realsense2.restype = cwipc_tiledsource_p\n if hasattr(_cwipc_realsense2_dll_reference, 'cwipc_rs2offline'):\n _cwipc_realsense2_dll_reference.cwipc_rs2offline.argtypes = [cwipc_offline_settings, ctypes.c_char_p, ctypes.POINTER(ctypes.c_char_p), ctypes.c_ulong]\n _cwipc_realsense2_dll_reference.cwipc_rs2offline.restype = cwipc_offline_p\n _cwipc_realsense2_dll_reference.cwipc_offline_free.argtypes = [cwipc_offline_p]\n _cwipc_realsense2_dll_reference.cwipc_offline_free.restype = None\n _cwipc_realsense2_dll_reference.cwipc_offline_get_source.argtypes = [cwipc_offline_p]\n _cwipc_realsense2_dll_reference.cwipc_offline_get_source.restype = cwipc_tiledsource_p\n _cwipc_realsense2_dll_reference.cwipc_offline_feed.argtypes = [cwipc_offline_p, ctypes.c_int, ctypes.c_int, ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p, ctypes.c_size_t]\n _cwipc_realsense2_dll_reference.cwipc_offline_feed.restype = ctypes.c_bool\n\n return _cwipc_realsense2_dll_reference",
"def _import():\n global _lib\n if _lib is None:\n try:\n _lib = ctypes.CDLL(\"libpapi.so\")\n except OSError:\n raise RuntimeError(\"can't import libpapi.so; please install it\")\n\n for name, argtypes, restype in _FUNCTIONS:\n fn = getattr(_lib, name)\n fn.argtypes = argtypes\n fn.restype = restype\n globals()[name] = fn\n \n return _lib",
"def _on_library_new(self, evt=None):\n \n # raise save dialog\n wildcard = \"Papyrus library format|*.papyrus\"\n dlg = wx.FileDialog(self, \"New Papyrus Library\", \"\", \"library.papyrus\", wildcard=wildcard, style=wx.FD_SAVE)\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n dlg.Destroy()\n else:\n dlg.Destroy()\n return\n \n # init library\n try:\n core.Library(path, new=True)\n except:\n wx.Bell()\n dlg = mwx.MessageDlg(self, -1, \"Cannot create the library.\", \"Please check access permissions.\", \"Error\")\n dlg.ShowModal()\n dlg.Destroy()\n return\n \n # open library\n self.OpenDocuments([path])",
"def initialize_libca():\n if 'EPICS_CA_MAX_ARRAY_BYTES' not in os.environ:\n os.environ['EPICS_CA_MAX_ARRAY_BYTES'] = \"%i\" % 2**24\n\n dllname = find_libca()\n load_dll = ctypes.cdll.LoadLibrary\n global libca, initial_context\n if os.name == 'nt':\n load_dll = ctypes.windll.LoadLibrary\n try:\n libca = load_dll(dllname)\n except:\n raise ChannelAccessException('loading Epics CA DLL failed')\n\n ca_context = {False:0, True:1}[PREEMPTIVE_CALLBACK]\n ret = libca.ca_context_create(ca_context)\n if ret != dbr.ECA_NORMAL:\n raise ChannelAccessException('cannot create Epics CA Context')\n\n # set argtypes and non-default return types\n # for several libca functions here\n libca.ca_pend_event.argtypes = [ctypes.c_double]\n libca.ca_pend_io.argtypes = [ctypes.c_double]\n libca.ca_client_status.argtypes = [ctypes.c_void_p, ctypes.c_long]\n libca.ca_sg_block.argtypes = [ctypes.c_ulong, ctypes.c_double]\n\n libca.ca_current_context.restype = ctypes.c_void_p\n libca.ca_version.restype = ctypes.c_char_p\n libca.ca_host_name.restype = ctypes.c_char_p\n libca.ca_name.restype = ctypes.c_char_p\n libca.ca_message.restype = ctypes.c_char_p\n\n # save value offests used for unpacking\n # TIME and CTRL data as an array in dbr module\n dbr.value_offset = (39*ctypes.c_short).in_dll(libca,'dbr_value_offset')\n initial_context = current_context()\n\n if AUTO_CLEANUP:\n atexit.register(finalize_libca)\n return libca",
"def recognize_lib(self, a, lib):\n logging.debug(\"in recognize lib\")\n self.produce(KEYWORD, lib)\n self.begin('lib')",
"def load_library(library_location):\n if not os.path.exists(library_location):\n raise FileNotFoundError('Invalid path: %s' % library_location)\n with dlopen_guard():\n ctypes.cdll.LoadLibrary(library_location)",
"def load_lib():\n curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))\n lib = ctypes.CDLL(os.path.join(curr_path, \"../../lib/libtvm_ext.so\"))\n return lib",
"def load_dll(dll_path):\r\n if KDC101.__lib is None:\r\n KDC101.__lib = load_dll(dll_path)",
"def on_lz_registered(self, func):\n self._set_event_handler(\"lz\")\n self._events.on_lz_registered(func)",
"def hook(callback):\n hooks.append(callback)",
"def __init__(self):\n self._ll = LowLevelLibs()\n self._lib = self._ll.phe",
"def load_device():",
"def load_shared_library(lib, _path='.', ver='*'):\n # find from the system path\n path = find_library(lib)\n if (path == None): # if fail, search in the custom directory\n s = platform.system()\n if (s == 'Darwin'): suf = ver+'.dylib'\n elif (s == 'Linux'): suf = '.so'+ver\n candidates = glob.glob(_path+'/lib'+lib+suf);\n if (len(candidates) == 1): path = candidates[0]\n else: return None\n cdll.LoadLibrary(path)\n return CDLL(path)",
"def process_library(self):\n self.process_namespace(self.newlibrary.wrap_namespace)"
]
| [
"0.5989056",
"0.5868301",
"0.57031065",
"0.5582317",
"0.5524115",
"0.5515501",
"0.55016947",
"0.5461312",
"0.5367845",
"0.5367845",
"0.53560525",
"0.53560525",
"0.5341005",
"0.53404593",
"0.53178525",
"0.5310855",
"0.530366",
"0.52396536",
"0.5220809",
"0.52202785",
"0.51994044",
"0.5159594",
"0.5118356",
"0.5073298",
"0.50624627",
"0.49965385",
"0.49841148",
"0.49619278",
"0.4958721",
"0.495696"
]
| 0.7665019 | 0 |
Search for the provided account address in KV 'History of Accounts' table. | def kv_seek_account_history(account_address: str, block_number: int, target: str = DEFAULT_TARGET):
account_history_key = kv_metadata.encode_account_history_key(account_address, block_number)
print('REQ1 account_address:', account_address, '(key: ' + str(account_history_key.hex()) + ')')
print('RSP1 account history: [')
walker = lambda key, value: print('key:', key.hex(), 'value:', value.hex())
kv_utils.kv_walk(target, tables.ACCOUNTS_HISTORY_LABEL, account_history_key, walker)
print(']')
print('REQ2 account_address:', account_address, '(key: ' + str(account_history_key.hex()) + ')')
print('RSP2 storage history: [')
walker = lambda key, value: print('key:', key.hex(), 'value:', value.hex())
kv_utils.kv_walk(target, tables.STORAGE_HISTORY_LABEL, account_history_key, walker)
print(']') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_by_address(self, address):\n assert len(address) == 20\n accounts = [account for account in self.accounts if account.address == address]\n if len(accounts) == 0:\n raise KeyError('account with address {} not found'.format(encode_hex(address)))\n elif len(accounts) > 1:\n log.warning('multiple accounts with same address found', address=encode_hex(address))\n return accounts[0]",
"def __get_account(self, address):\n\t\tfor acct in self.wallet:\n\t\t\tif acct[\"address\"] == address:\n\t\t\t\treturn acct\n\t\traise ValueError(\"The given address does not exist in the bunkr-wallet\")",
"def search_by_account_number(self, account_num):\n for entry in self.entries:\n if entry['Account Number'] == int(account_num):\n self.pp_entry(entry)",
"def fetch_history(self, address, cb, from_height=0):\r\n address_version, address_hash = \\\r\n bitcoin.bc_address_to_hash_160(address)\r\n # prepare parameters\r\n data = struct.pack('B', address_version) # address version\r\n data += address_hash[::-1] # address\r\n data += struct.pack('<I', from_height) # from_height\r\n\r\n # run command\r\n self.send_command('address.fetch_history', data, cb)",
"def get_history_from_address(address):\n \n if not exists_address_transfer_observation_from(address):\n return make_response(jsonify(build_error('No content. transactions from the address are not observed')), 204)\n \n take = request.args.get('take')\n if take is None:\n take = 0\n else:\n take = int(take)\n \n afterhash = request.args.get('afterHash')\n if afterhash is None:\n afterhash = \"\" \n \n update_index() \n items = get_transactions_from(address, take, afterhash)\n \n if 'error' in items:\n return make_response(jsonify(build_error(items['error'])), items['status'])\n\n return jsonify(items)",
"def find_by_account(cls, accounts):\n\n for name in cls.account_list:\n if name.acc_name == accounts:\n return name",
"def get_addresses_by_account(account):\n try:\n stdout = subprocess.check_output([\"litecoin-cli\", \"getaddressesbyaccount\", account])\n addresses = json.loads(stdout.decode())\n except:\n sys.exit(1)\n\n return addresses",
"def lookup_by_account_name(account_name):\n try:\n account = session.query(SnapshotBalance).filter(\n func.lower(SnapshotBalance.account_name) == account_name.lower()).first()\n except IntegrityError as pie:\n msg = str(pie)\n raise InvalidUsage(msg, status_code=400)\n return account",
"def get_history_to_address(address):\n \n if not exists_address_transfer_observation_to(address):\n return make_response(jsonify(build_error('No content: transactions to the address are not observed')), 204)\n \n take = request.args.get('take')\n if take is None:\n take = 0\n else:\n take = int(take)\n \n afterhash = request.args.get('afterHash'.lower())\n if afterhash is None:\n afterhash = ''\n \n update_index() \n items = get_transactions_to(address, take, afterhash)\n\n response = items if take == 0 else items[0:take]\n\n return jsonify(response)",
"def getaccountaddress(self, account):\n return self.proxy.getaccountaddress(account)",
"def get_account_address(account_name):\n command = 'getaddressesbyaccount {0}'.format(account_name)\n result = do_command(command)\n if result == -1:\n log('Fatal error: get addresses by account faild!')\n return -1\n\n json_obj = json.loads(result)\n address_count = len(json_obj)\n if address_count == 0:\n log('no account address: {0}, to create new one!'.format(account_name))\n command = 'getaccountaddress {0}'.format(account_name)\n result = do_command(command)\n if result == -1:\n log('Fatal error, create new address faild: {0}'.format(account_name))\n return -1\n else:\n return result\n else:\n return json_obj[0]",
"def account(request):\n def searchAccounts(prop, domain, added, response):\n prefix = request.GET.get('q').lower()\n limit = _clean_int(request.GET.get('limit'), 10, 10, 100)\n\n accounts_query = models.Account.query(\n prop >= prefix, prop < prefix + u\"\\ufffd\").order(prop)\n for account in accounts_query:\n if account.blocked:\n continue\n if account.key in added:\n continue\n if domain and not account.email.endswith(domain):\n continue\n if len(added) >= limit:\n break\n added.add(account.key)\n response += '%s (%s)\\n' % (account.email, account.nickname)\n return added, response\n\n added = set()\n response = ''\n domain = os.environ['AUTH_DOMAIN']\n if domain != 'gmail.com':\n # 'gmail.com' is the value AUTH_DOMAIN is set to if the app is running\n # on appspot.com and shouldn't prioritize the custom domain.\n added, response = searchAccounts(\n models.Account.lower_email, domain, added, response)\n added, response = searchAccounts(\n models.Account.lower_nickname, domain, added, response)\n\n added, response = searchAccounts(\n models.Account.lower_nickname, \"\", added, response)\n added, response = searchAccounts(\n models.Account.lower_email, \"\", added, response)\n return HttpTextResponse(response)",
"def _search_account_history(cyclos, account, direction, begin_date, end_date, payment_types=[]):\n current_page = 0\n account_history = []\n while True:\n search_history_data = {\n 'account': account,\n 'direction': direction,\n 'period':\n {\n 'begin': begin_date,\n 'end': end_date,\n },\n 'orderBy': 'DATE_ASC',\n 'pageSize': 1000, # maximum pageSize: 1000\n 'currentPage': current_page,\n }\n search_history_res = cyclos.post(method='account/searchAccountHistory', data=search_history_data)\n account_history.extend(search_history_res['result']['pageItems'])\n page_count = search_history_res['result']['pageCount']\n if page_count == 0 or current_page + 1 == page_count:\n break\n else:\n current_page += 1\n filtered_history = []\n for entry in account_history:\n # On filtre d'abord par type de paiement et ensuite on regarde\n # si le paiement a fait l'objet d'une opposition de paiement\n # (dans cet ordre car pour voir s'il y a une oppostion de\n # paiement, il faut faire une requête au serveur).\n # On récupère les données de la transaction et on vérifie si la\n # donnée 'chargedBackBy' est présente dans le transfert associé.\n #\n # Note : Les transactions importées lors de la migration de\n # Cyclos 3 à Cyclos 4 sont de type ImportedTransactionData et\n # n'ont pas de transfert associé. Elles ne peuvent pas être\n # annulées. Les transactions enregistrées depuis (les\n # transactions \"normales\" en quelque sorte), sont de type\n # PaymentData.\n if entry['type']['id'] in payment_types:\n get_data_res = cyclos.get(method='transaction/getData/{}'.format(entry['transactionId']))\n transaction_data = get_data_res['result']\n if (transaction_data['class'] ==\n 'org.cyclos.model.banking.transactions.ImportedTransactionData'\n or (transaction_data['class'] ==\n 'org.cyclos.model.banking.transactions.PaymentData'\n and'chargedBackBy' not in transaction_data['transfer'].keys())):\n filtered_history.append(entry)\n return filtered_history",
"def on_account_addition(self, address: ChecksumEthAddress) -> None:\n ...",
"def find_by_account_name(cls, account_name):\n for account in cls.credentials_list:\n if account.account_name == account_name:\n return account",
"def find_address():\n while True:\n business_object = query_business_name()\n if business_object == \"back\":\n return\n elif business_object is None:\n continue\n\n print(f'{business_object[\"name\"]}\\'s address is:'\n f'{business_object[\"address\"]}, {business_object[\"city\"]} '\n f'{business_object[\"state\"]}')",
"def getaddressesbyaccount(self, account):\n return self.proxy.getaddressesbyaccount(account)",
"def add_history_from_address(address):\n \n result = add_transaction_observation_from_address(address)\n \n # if successfully stored in observation list, return a plain 200\n if \"error\" in result:\n return make_response(jsonify(build_error(result[\"error\"])), result[\"status\"])\n else:\n return \"\"",
"def account_history(self, account=None, type='all', range=\"all\"):\n \n if not (utils.check(type) and utils.check(range)):\n return {}\n \n # Imply account\n if account == None:\n account = self.params['account']\n \n # Assemble URL\n url = self.endpoints['base'] +\\\n 'accounts/' +\\\n str(account) +\\\n '/history.json'\n # Add parameters\n data = {\n 'range':range,\n 'transactions':type\n }\n \n # Create HTTP Request objects\n session = requests.Session()\n auth = self.create_auth()\n req = requests.Request('GET',url,params=data,auth=auth).prepare()\n \n \n results = {'response':session.send(req).json()}\n results['request'] = utils.pretty_print_POST(req)\n \n return results['response']['response']['transactions']['transaction']",
"def account(request):\n def searchAccounts(prop, domain, added, response):\n prefix = request.GET.get('q').lower()\n required_reviewer = prefix.startswith(models.REQUIRED_REVIEWER_PREFIX)\n prefix = prefix.lstrip(models.REQUIRED_REVIEWER_PREFIX)\n limit = _clean_int(request.GET.get('limit'), 10, 10, 100)\n\n # This uses eventual consistency and cannot be made strongly consistent.\n accounts_query = models.Account.query(\n prop >= prefix, prop < prefix + u\"\\ufffd\").order(prop)\n for account in accounts_query:\n if account.blocked:\n continue\n if account.key in added:\n continue\n if domain and not account.email.endswith(domain):\n continue\n if len(added) >= limit:\n break\n added.add(account.key)\n if required_reviewer:\n response += models.REQUIRED_REVIEWER_PREFIX\n response += '%s (%s)\\n' % (account.email, account.nickname)\n return added, response\n\n added = set()\n response = ''\n domain = os.environ['AUTH_DOMAIN']\n if domain != 'gmail.com':\n # 'gmail.com' is the value AUTH_DOMAIN is set to if the app is running\n # on appspot.com and shouldn't prioritize the custom domain.\n added, response = searchAccounts(\n models.Account.lower_email, domain, added, response)\n added, response = searchAccounts(\n models.Account.lower_nickname, domain, added, response)\n\n added, response = searchAccounts(\n models.Account.lower_nickname, \"\", added, response)\n added, response = searchAccounts(\n models.Account.lower_email, \"\", added, response)\n return HttpTextResponse(response)",
"def add_history_to_address(address):\n \n result = add_transaction_observation_to_address(address)\n \n # if successfully stored in observation list, return a plain 200\n if \"error\" in result:\n return make_response(jsonify(build_error(result[\"error\"])), result[\"status\"])\n else:\n return \"\"",
"def set_account_id(account_id):\n conn = get_connect()\n conn.execute(\"UPDATE account SET isSearched = 1 WHERE accountId = \" + str(account_id))\n conn.commit()\n conn.close()\n print(\"accountId \" + str(account_id) + \" has been searched\")\n return",
"def query_account(self, account: str, fields: str = None):\n if fields and (type(fields) != str):\n raise TypeError('fields: %s' % repr(fields))\n args = {'account': account}\n if fields:\n args['fields'] = fields\n ret = self._call_txtrader_api('query_account', args)\n return ret",
"def bank_lookup_account(stub, request):\n # print(\"In method bank_lookup_account:\")\n result = stub.LookUpAccount(request) # <-- remember to check whether port is occupied!\n # from line 26 to 28 seem never gonna be reached!\n if result is None:\n return \"Unable to find record\"\n # raise AccountNotExistError\n # print(result)\n return result",
"def accounts():",
"def find(self, identifier):\n try:\n uuid = UUID(identifier)\n except ValueError:\n pass\n else:\n return self.get_by_id(uuid.hex)\n\n try:\n index = int(identifier, 10)\n except ValueError:\n pass\n else:\n if index <= 0:\n raise ValueError('Index must be 1 or greater')\n try:\n return self.accounts[index - 1]\n except IndexError as e:\n raise KeyError(e.message)\n\n if identifier[:2] == '0x':\n identifier = identifier[2:]\n try:\n address = decode_hex(identifier)\n except TypeError:\n success = False\n else:\n if len(address) != 20:\n success = False\n else:\n return self[address]\n\n assert not success\n raise ValueError('Could not interpret account identifier')",
"def get_account_by_name(self, account_name):\n accounts = self.service_old.management().accounts().list().execute()\n\n account = None\n if accounts.get('items'):\n account = next(acnt for acnt in accounts.get('items') if acnt[\"name\"] == account_name)\n\n if account is None:\n log_msg = \"The account named \" + account_name + \" does not exist!\"\n print(log_msg)\n\n return account",
"def address_transactions(self, address):\n res = r.get(self.url + self.address_tx + str(address))\n return self.execute(res)",
"def getaccount(self, vergeaddress):\n return self.proxy.getaccount(vergeaddress)",
"def search_name(self, accountId, first_name, last_name):\n p = {\"first_name\": first_name, \"last_name\": last_name}\n if accountId:\n p['accountId'] = accountId\n return self.get_json('/verification/search', params=p)"
]
| [
"0.6519464",
"0.6338236",
"0.6017004",
"0.5988657",
"0.5958928",
"0.59446615",
"0.58395207",
"0.5836881",
"0.5755204",
"0.5747608",
"0.56816727",
"0.56154644",
"0.56109655",
"0.55488646",
"0.55431986",
"0.55253816",
"0.55104375",
"0.550851",
"0.5498978",
"0.5483937",
"0.54425627",
"0.531818",
"0.5303172",
"0.529316",
"0.529072",
"0.52545816",
"0.522628",
"0.5222574",
"0.52134025",
"0.51998204"
]
| 0.63830864 | 1 |
Checks if "console.getData()" JS command returns some error. If so, the test will fail. | def _checkJSErrors(self):
js_error = False
console_data = []
try:
console_data = self.driver.execute_script("return console.getData()")
except:
pass
if console_data:
for item in console_data:
if item["type"] == "error":
js_error = item
break
# fail test if there is any JS error in the console
if js_error:
self.fail("An JS error has occured on the page: " + json.dumps(js_error)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_data(clear, data):\r\n cmd = ShdlcCmdGetErrorState(clear=clear)\r\n assert type(cmd.data) is bytes\r\n assert cmd.data == data",
"def test_get_data_fail(self):\n self.assertIsNone(get_data('this_must_fail', 5, 0))",
"def test_command(self):\n out = io.StringIO()\n management.call_command('import_data', stdout=out)\n self.assertIn(\"Successfully imported\", out.getvalue())",
"def error_check(command):\r\n\r\n # TODO\r",
"def test_port_get_data_error(self):\n test_sensor_error_data = {\n \"sensorid\": int(self.test_sens_data['sensorid']),\n \"error\": \"Exception\",\n \"code\": 1,\n \"message\": \"Port check failed. See log for details\"\n }\n self.test_port.get_data(self.test_sens_data, self.test_out_queue)\n assert_equal(self.test_out_queue.get(), test_sensor_error_data)",
"def test_shell_bad_command():\n out, err = shell_command(\"ls adasdasdas\")\n assert out is None\n assert \"adasdasdas\" in err",
"def test_old_data_format_error(self):\n assert_raises(ValueError, get_data, self.testv1)",
"def test_snmptdisk_get_data_error(self):\n test_sensor_error_data = {\n \"sensorid\": int(self.test_sens_data['sensorid']),\n \"error\": \"Exception\",\n \"code\": 1,\n \"message\": \"SNMP Request failed. See log for details\"\n }\n self.test_snmpdisk.get_data(self.test_sens_data, self.test_out_queue)\n assert_equal(self.test_out_queue.get(), test_sensor_error_data)",
"def test_example(self, _, cmd):\n out = subprocess.run(cmd, shell=True)\n self.assertFalse(out.returncode)",
"def test_read_commandline_bad_cmd(dataframe):\n temp_dir = tempfile.gettempdir()\n\n # create a temporary .csv file\n dataframe.to_csv(f\"{temp_dir}/dataframe.csv\")\n\n # Test 1\n with pytest.raises(TypeError):\n janitor.io.read_commandline(6)\n\n # Test 2\n with pytest.raises(CalledProcessError):\n janitor.io.read_commandline(\"bad command\")\n\n # Test 3\n # windows does not support \"cat\" in commandline\n # \"type\" command must be used and it returns a different error\n cmd = \"cat\"\n\n ExpectedError = pd.errors.EmptyDataError\n if sys.platform in [\"win32\"]:\n cmd = \"type\"\n ExpectedError = CalledProcessError\n\n with pytest.raises(ExpectedError):\n janitor.io.read_commandline(cmd)\n\n # clean up after the tests\n os.unlink(f\"{temp_dir}/dataframe.csv\")",
"def test_ping_get_data_error(self):\n test_sensor_error_data = {\n \"sensorid\": int(self.test_sens_data['sensorid']),\n \"error\": \"Exception\",\n \"code\": 1,\n \"message\": \"Ping failed.\"\n }\n self.test_ping.get_data(self.test_sens_data, self.test_out_queue)\n assert_equal(self.test_out_queue.get(), test_sensor_error_data)",
"def test_get_case_command_fail(loqusdbapi, mocker):\n # GIVEN a loqusdb api and a case id\n case_id = 'a_case'\n # WHEN an error occurs during fetching a case with the adapter\n mocker.patch.object(subprocess, 'check_output')\n subprocess.check_output.side_effect = subprocess.CalledProcessError(1, 'error')\n\n # THEN assert that the error is raised\n with pytest.raises(subprocess.CalledProcessError):\n loqusdbapi.get_case(case_id)",
"def test_snmpprocss_get_data_error(self):\n test_sensor_error_data = {\n \"sensorid\": int(self.test_sens_data['sensorid']),\n \"error\": \"Exception\",\n \"code\": 1,\n \"message\": \"SNMP Request failed. See log for details\"\n }\n self.test_snmpprocess.get_data(self.test_sens_data, self.test_out_queue)\n assert_equal(self.test_out_queue.get(), test_sensor_error_data)",
"def check_errors(self) -> None:",
"def test_first_step(self):\n self.assertIsNone(cd.shared.fetch('df'))\n step = self.run_step('S01-first.py')\n df = cd.shared.df\n self.assertFalse(df.isnull().values.any())\n\n error_echo = step.echo_error()\n self.assertEqual(error_echo, '')",
"def test_snmptmemory_get_data_error(self):\n test_sensor_error_data = {\n \"sensorid\": int(self.test_sens_data['sensorid']),\n \"error\": \"Exception\",\n \"code\": 1,\n \"message\": \"SNMP Request failed. See log for details\"\n }\n self.test_snmpmemory.get_data(self.test_sens_data, self.test_out_queue)\n assert_equal(self.test_out_queue.get(), test_sensor_error_data)",
"def test_debug_output(self):\n assert output(self.msg) is not None",
"def get_data(command):\n command = subprocess.run(\n command,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n encoding='utf-8',\n )\n\n # if command not succesfully executed, stop script\n if command.returncode != 0:\n print(f'ERROR:~ {command.stderr}')\n return False\n\n if not command.stdout:\n print(f'ERROR:~ Command output [{command}] is empty')\n return command.stdout",
"def test_wrong_input():\n dwd = DwdWeatherWarningsAPI(None)\n assert not dwd.data_valid\n assert dwd.warncell_id is None\n assert dwd.warncell_name is None\n assert dwd.last_update is None\n assert dwd.current_warning_level is None\n assert dwd.expected_warning_level is None\n assert dwd.current_warnings is None\n assert dwd.expected_warnings is None",
"def _is_non_real_command_found(self, script_data):\n is_valid = True\n depends_on_commands = script_data.get('depends_on')\n if depends_on_commands:\n for command in depends_on_commands:\n if command != 'test-module':\n if command.endswith('dev') or command.endswith('copy'):\n error_message, error_code = Errors.invalid_command_name_in_script(script_data.get('name'),\n command)\n if self.handle_error(error_message, error_code, file_path=\"id_set.json\"):\n return not is_valid\n return is_valid",
"def test_check_if_not_error(self):\n actual_result = SshErrorExitCodeController(ERROR_RETURN_CODE,\n OK_MESSAGE)\\\n .check_if_error()\n self.assertIsNone(actual_result)",
"def test_main_succeeds(runner: CliRunner, mock_requests_get: MockFixture) -> None:\n result = runner.invoke(console.main)\n assert result.exit_code == 0",
"def test_write_to_console_fail(self, _step: PropertyMock):\n _step.return_value = None\n step = exposed.ExposedStep()\n with self.assertRaises(ValueError):\n step.write_to_console('hello')",
"def test_command(self):\n output, _error = self.executor.command(['echo', 'hello']).batch()\n self.assertEqual(output, 'hello\\n')",
"def test_read_unexpected_error(self, data, requests_mock, capsys):\n requests_mock.get(data_url, exc=ConnectionError)\n with pytest.raises(ConnectionError):\n r = operations.read(data_url)\n assert 'Unexpected error when connecting to' in capsys.readouterr().out",
"def test_ds18b20_get_data_error(self):\n test_sensor_error_data = {\n \"sensorid\": int(self.test_sens_data['sensorid']),\n \"error\": \"Exception\",\n \"code\": 1,\n \"message\": \"DS18B20 sensor failed. See log for details\"\n }\n self.test_ds18b20.get_data(self.test_sens_data, self.test_out_queue)\n assert_equal(self.test_out_queue.get(), test_sensor_error_data)",
"def test_check_if_not_error(self):\n actual_result = SshpassErrorExitCodeController(OK_RETURN_CODE,\n OK_MESSAGE)\\\n .check_if_error()\n self.assertIsNone(actual_result)",
"def _verifyCommand(self):\n for i in range(3):\n rc = self.subdevice.command_test() # Verify command is correct\n if rc is None:\n break",
"def test_command(self):\r\n with self.assertRaises(SystemExit) as ex:\r\n with self.assertRaisesRegexp(CommandError, 'This script requires.*'):\r\n call_command('git_export', 'blah', 'blah', 'blah',\r\n stderr=StringIO.StringIO())\r\n self.assertEqual(ex.exception.code, 1)\r\n\r\n with self.assertRaises(SystemExit) as ex:\r\n with self.assertRaisesRegexp(CommandError, 'This script requires.*'):\r\n call_command('git_export', stderr=StringIO.StringIO())\r\n self.assertEqual(ex.exception.code, 1)\r\n\r\n # Send bad url to get course not exported\r\n with self.assertRaises(SystemExit) as ex:\r\n with self.assertRaisesRegexp(CommandError, GitExportError.URL_BAD):\r\n call_command('git_export', 'foo/bar/baz', 'silly',\r\n stderr=StringIO.StringIO())\r\n self.assertEqual(ex.exception.code, 1)\r\n # Send bad course_id to get course not exported\r\n with self.assertRaises(SystemExit) as ex:\r\n with self.assertRaisesRegexp(CommandError, GitExportError.BAD_COURSE):\r\n call_command('git_export', 'foo/bar:baz', 'silly',\r\n stderr=StringIO.StringIO())\r\n self.assertEqual(ex.exception.code, 1)",
"def get_error(session, command):\n \n has_err=False\n resp = SCPI_sock_query(session,\"SYST:ERR?\")\n \n if int(resp[:2]) != 0:\n print( \"Your command: \" + command + \" has errors:\" )\n print( resp )\n has_err = True\n while int(resp[:2]) != 0:\n resp=SCPI_sock_query(session,\"SYST:ERR?\")\n if int(resp[:2]) != 0:\n print( resp )\n\n return has_err"
]
| [
"0.65767086",
"0.63340867",
"0.6225417",
"0.6032751",
"0.6028203",
"0.59820247",
"0.58227086",
"0.57617176",
"0.57090616",
"0.5661298",
"0.56505835",
"0.5646934",
"0.5640983",
"0.5589693",
"0.5580483",
"0.55710274",
"0.5570266",
"0.5546296",
"0.55458724",
"0.5529029",
"0.55225265",
"0.55004615",
"0.5499608",
"0.5496295",
"0.5490791",
"0.54815006",
"0.5478616",
"0.5476213",
"0.5476081",
"0.5469174"
]
| 0.63554543 | 1 |
Some special settings for IE to make the browser more stable. | def _finetuneIE(self):
# start IE in private mode to prevent storing cookies
self._browser_capabilities["ie.forceCreateProcessApi"] = 1
self._browser_capabilities["ie.browserCommandLineSwitches"] = "-private"
# seems not reliable. More testing needed.
#self._browser_capabilities["ie.usePerProcessProxy"] = True
# Too slow. Private mode is probably better solution for cache and cookie cleaning
#self._browser_capabilities["ie.ensureCleanSession"] = True
# IE seems to be more stable with this option
self._browser_capabilities["ie.setProxyByServer"] = True
# IE8 hack to prevent "...click on the element was not scrolled into the viewport" error
if self._browser_capabilities["version"] == "8.0":
self._browser_capabilities["elementScrollBehavior"] = 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def internet_explorer():\n driver = Driver()\n driver.browser = 'ie'\n return IE(driver)",
"def test_msieParser(self):\n agent = UserAgent.fromHeaderValue(\n 'Mozilla/4.0 (compatible; MSIE 4.5; Windows 98; Win 9x 4.8410008)')\n self.assertEqual(agent.browser, browsers.INTERNET_EXPLORER)",
"def __init__(self, **kwargs):\n super(DesktopInternetExplorerWebDriver, self).__init__(\n default_capabilities=False, # ensure that HackedWebDriver doesn't mess with the capabilities\n **kwargs\n )",
"def test_parseExplorer65(self):\n agent = UserAgent.parse_MSIE(\n 'Mozilla/5.0 (compatible; MSIE 6.5; Windows 98; Win 9x 4.7654712)')\n self.assertEqual(agent.browser, browsers.INTERNET_EXPLORER)\n self.assertEqual(agent.version, (6, 5))",
"def test_parseExplorer45(self):\n agent = UserAgent.parse_MSIE(\n 'Mozilla/4.0 (compatible; MSIE 4.5; Windows 98; Win 9x 4.8410008)')\n self.assertEqual(agent.browser, browsers.INTERNET_EXPLORER)\n self.assertEqual(agent.version, (4, 5))",
"def test_parseExplorer55(self):\n agent = UserAgent.parse_MSIE(\n 'Mozilla/5.0 (compatible; MSIE 5.5; Windows 98; Win 9x 4.1704896)')\n self.assertEqual(agent.browser, browsers.INTERNET_EXPLORER)\n self.assertEqual(agent.version, (5, 5))",
"def add_header(response):\r\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\r\n response.headers['Cache-Control'] = 'public, max-age=0'\r\n return response",
"def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response",
"def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response",
"def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response",
"def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response",
"def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response",
"def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response",
"def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response",
"def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response",
"def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response",
"def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response",
"def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response",
"def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response",
"def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response",
"def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response",
"def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response",
"def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response",
"def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response",
"def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response",
"def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response",
"def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response",
"def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response",
"def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response",
"def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response"
]
| [
"0.6183956",
"0.58615327",
"0.5657646",
"0.5383154",
"0.5360376",
"0.53398156",
"0.5253256",
"0.51662785",
"0.51662785",
"0.51662785",
"0.51662785",
"0.51662785",
"0.51662785",
"0.51662785",
"0.51662785",
"0.51662785",
"0.51662785",
"0.51662785",
"0.51662785",
"0.51662785",
"0.51662785",
"0.51662785",
"0.51662785",
"0.51662785",
"0.51662785",
"0.51662785",
"0.51662785",
"0.51662785",
"0.51662785",
"0.51662785"
]
| 0.6713149 | 0 |
Creates indices for a mini_batch, given an iteration state and number of data points in a batch. | def BatchCreator(self, j, n_batch):
j_start = (j-1)*n_batch + 1
j_end = j*n_batch + 1
ind = np.arange(start= j_start, stop=j_end, step=1)
return ind | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_batch_indices(batch_size: int, beam_size: int) ->torch.LongTensor:\n batch_pos = torch.arange(batch_size)\n batch_pos = batch_pos.view(-1, 1).expand(batch_size, beam_size)\n return batch_pos",
"def batch_indices(self):\n b = self.batch_size\n return [np.arange(i*b, i*b+b) for i in range(self.num_batches)]",
"def compute_batch_indices(batch_size, beam_size):\n batch_range = tf.range(batch_size * beam_size) // beam_size\n batch_index_matrix = tf.reshape(batch_range, [batch_size, beam_size])\n return batch_index_matrix",
"def make_batch_indices(instance_indices):\n batch_indices = list(itertools.chain.from_iterable([\n [(row_number, index) for index in indices]\n for row_number, indices in enumerate(instance_indices)\n ]))\n # Indices must have shape (?, 2). Therefore if we encounter an empty\n # batch, we return an empty ndarray with shape (0, 2).\n return batch_indices if batch_indices else np.empty([0, 2], dtype=np.int64)",
"def __init__(self, list_of_iteration_args, index_list, batch_size,\n one_hot_size=None, random_state=None):\n self.list_of_iteration_args = list_of_iteration_args\n self.batch_size = batch_size\n\n self.random_state = random_state\n if random_state is None:\n raise ValueError(\"Must pass random state for random selection\")\n\n self.one_hot_size = one_hot_size\n if one_hot_size is not None:\n assert len(one_hot_size) == len(list_of_iteration_args)\n\n iteration_args_lengths = []\n iteration_args_dims = []\n for n, ts in enumerate(list_of_iteration_args):\n c = [(li, np.array(tis).shape) for li, tis in enumerate(ts)]\n if len(iteration_args_lengths) > 0:\n assert c[-1][0] == iteration_args_lengths[-1]\n assert c[-1][1] == iteration_args_dims[-1]\n iteration_args_lengths.append(c[-1][0] + 1)\n iteration_args_dims.append(c[-1][1])\n self.iteration_args_lengths_ = iteration_args_lengths\n self.iteration_args_dims_ = iteration_args_dims\n\n # set up the matrices to slice one_hot indexes out of\n # todo: setup slice functions? or just keep handling in next_batch\n if one_hot_size is None:\n self._oh_slicers = [None] * len(list_of_iteration_args)\n else:\n self._oh_slicers = []\n for ooh in one_hot_size:\n if ooh is None:\n self._oh_slicers.append(None)\n else:\n self._oh_slicers.append(np.eye(ooh, dtype=np.float32))\n\n self.index_list = index_list\n if len(self.index_list) != self.iteration_args_lengths_[0]:\n raise ValueError(\"index_list must have same length as iterations args, got {} and {}\".format(len(self.index_list), self.iteration_args_lengths_[0]))\n self.index_set = sorted(list(set(index_list)))\n self.index_groups = {k: np.array([n for n, i in enumerate(index_list) if i == k]) for k in self.index_set}\n\n shuf_set = copy.copy(self.index_set)\n self.random_state.shuffle(shuf_set)\n self.all_index_ = np.array([i for i in shuf_set for ii in self.index_groups[i]])\n self.all_indices_ = np.array([ii for i in shuf_set for ii in self.index_groups[i]])\n self.all_indices_ = self.all_indices_[:len(self.all_indices_) - len(self.all_indices_) % self.batch_size]\n\n self.indices_offset_ = 0\n self.indices_ = self.all_indices_[self.indices_offset_:self.indices_offset_ + self.batch_size]\n self.index_ = self.all_index_[self.indices_offset_:self.indices_offset_ + self.batch_size]\n self.indices_offset_ += self.batch_size",
"def get_minibatches_idx(n, minibatch_size, shuffle=False):\n\n idx_list = np.arange(n, dtype=\"int32\")\n\n if shuffle:\n random.shuffle(idx_list)\n\n minibatches = []\n minibatch_start = 0\n for i in range(n // minibatch_size):\n minibatches.append(idx_list[minibatch_start:\n minibatch_start + minibatch_size])\n minibatch_start += minibatch_size\n\n if (minibatch_start != n):\n # Make a minibatch out of what is left\n minibatches.append(idx_list[minibatch_start:])\n\n return zip(range(len(minibatches)), minibatches)",
"def get_minibatches_idx(n, minibatch_size, shuffle=False):\n\n idx_list = numpy.arange(n, dtype=\"int32\")\n\n if shuffle:\n numpy.random.shuffle(idx_list)\n\n minibatches = []\n minibatch_start = 0\n for i in range(n // minibatch_size):\n minibatches.append(idx_list[minibatch_start:\n minibatch_start + minibatch_size])\n minibatch_start += minibatch_size\n\n if (minibatch_start != n):\n # Make a minibatch out of what is left\n minibatches.append(idx_list[minibatch_start:])\n\n return zip(range(len(minibatches)), minibatches)",
"def getMiniBatch(self, batch_size):\r\n indices = self.get_indices(batch_size)\r\n states, actions, rewards, next_states, is_done_vec = [], [], [], [], []\r\n for ER_i in self.ER_array:\r\n agent_experiences = [ER_i.buffer[index] for index in indices] # extract experiences\r\n # ---- organize the experiences so that the experiences of each agent are aligned ----\r\n states_i, actions_i, rewards_i, next_states_i, is_done_vec_i = [], [], [], [], []\r\n for experience in agent_experiences:\r\n states_i.append(experience.state)\r\n actions_i.append(experience.action)\r\n rewards_i.append(experience.reward)\r\n next_states_i.append(experience.next_state)\r\n is_done_vec_i.append(experience.done)\r\n states.append(states_i)\r\n actions.append(actions_i)\r\n rewards.append(rewards_i)\r\n next_states.append(next_states_i)\r\n is_done_vec.append(is_done_vec_i)\r\n return states, actions, rewards, next_states, is_done_vec",
"def master_ndindex(self): # itermaster_indices(self):\n return itertools_product(\n *[range(*r) for r in self.location]\n ) # TODO check",
"def get_minibatches_idx(n, minibatch_size, shuffle=False):\n\n idx_list = range(n)\n\n if shuffle:\n random.shuffle(idx_list)\n\n minibatches = []\n minibatch_start = 0\n for i in range(n // minibatch_size):\n minibatches.append(idx_list[minibatch_start:\n minibatch_start + minibatch_size])\n minibatch_start += minibatch_size\n\n if (minibatch_start != n):\n # Make a minibatch out of what is left\n minibatches.append(idx_list[minibatch_start:])\n\n return zip(range(len(minibatches)), minibatches)",
"def get_tiled_indices(batch_size, row_size):\n tiled_indices = tf.range(batch_size)\n tiled_indices = tf.tile(tf.expand_dims(tiled_indices, axis=1), (1, row_size))\n tiled_indices = tf.reshape(tiled_indices, (-1, 1))\n return tiled_indices",
"def train_minibatches(self):\n batch_size = self.params['batch_size']\n start_index = 0\n while start_index + batch_size < 500:\n end_index = start_index + batch_size\n yield self.input[start_index:end_index], self.y[start_index:end_index]\n start_index = end_index",
"def construct_indices(after_pooling):\n our_indices = np.zeros_like(after_pooling, dtype=np.int64)\n batch_num, channel_num, row_num, col_num = after_pooling.shape\n for batch_id in range(batch_num):\n for channel_id in range(channel_num):\n for row_id in range(row_num):\n for col_id in range(col_num):\n our_indices[batch_id, channel_id, row_id, col_id] = col_num * 2 * 2 * row_id + 2 * col_id\n return torch.from_numpy(our_indices)",
"def __init__(self, list_of_iteration_args, batch_size,\n one_hot_size=None, random_state=None):\n self.list_of_iteration_args = list_of_iteration_args\n self.batch_size = batch_size\n\n self.random_state = random_state\n if random_state is None:\n raise ValueError(\"Must pass random state for random selection\")\n\n self.one_hot_size = one_hot_size\n if one_hot_size is not None:\n assert len(one_hot_size) == len(list_of_iteration_args)\n\n iteration_args_lengths = []\n iteration_args_dims = []\n for n, ts in enumerate(list_of_iteration_args):\n c = [(li, np.array(tis).shape) for li, tis in enumerate(ts)]\n if len(iteration_args_lengths) > 0:\n if len(c[-1][1]) == 0:\n raise ValueError(\"iteration_args arguments should be at least 2D arrays, detected 1D\")\n # +1 to handle len vs idx offset\n if c[-1][0] + 1 != iteration_args_lengths[-1]:\n raise ValueError(\"iteration_args arguments should have the same iteration length (dimension 0)\")\n #if c[-1][1] != iteration_args_dims[-1]:\n # from IPython import embed; embed(); raise ValueError()\n\n iteration_args_lengths.append(c[-1][0] + 1)\n iteration_args_dims.append(c[-1][1])\n self.iteration_args_lengths_ = iteration_args_lengths\n self.iteration_args_dims_ = iteration_args_dims\n\n # set up the matrices to slice one_hot indexes out of\n # todo: setup slice functions? or just keep handling in next_batch\n if one_hot_size is None:\n self._oh_slicers = [None] * len(list_of_iteration_args)\n else:\n self._oh_slicers = []\n for ooh in one_hot_size:\n if ooh is None:\n self._oh_slicers.append(None)\n else:\n self._oh_slicers.append(np.eye(ooh, dtype=np.float32))\n\n # set up the indices selected for the first batch\n self.indices_ = self.random_state.choice(self.iteration_args_lengths_[0],\n size=(batch_size,), replace=False)",
"def _build_iteration_indexes(data_len, num_iterations,\n verbose=False, random_generator=None,\n use_epochs=False):\n if use_epochs:\n iterations_per_epoch = arange(data_len)\n if random_generator:\n random_generator.shuffle(iterations_per_epoch)\n iterations = tile(iterations_per_epoch, num_iterations)\n else:\n iterations = arange(num_iterations) % data_len\n if random_generator:\n random_generator.shuffle(iterations)\n if verbose:\n return _wrap_index__in_verbose(iterations)\n else:\n return iterations",
"def _generate_indices(random_state, bootstrap, n_population, n_samples):\n # Draw sample indices\n if bootstrap:\n indices = random_state.randint(0, n_population, n_samples)\n else:\n indices = sample_without_replacement(n_population, n_samples,\n random_state=random_state)\n\n return indices",
"def _generate_sample_indexes(random_state, n_samples, n_samples_bootstrap):\n # Obtain the random state\n random_state = check_random_state(random_state)\n\n # Obtain the indexes for the samples taking\n # into account the total number of samples\n # and the number of samples to be taken\n sample_indexes = random_state.randint(0, n_samples, n_samples_bootstrap)\n\n # Return them\n return sample_indexes",
"def tensor2indices(batch_sents):\n size_batch = tf.shape(batch_sents)[0]\n len_batch = tf.shape(batch_sents)[1]\n batch_i = tf.range(size_batch)\n len_i = tf.range(len_batch)\n\n # [0,0,0,1,1,1,2,2,2,...]\n batch_i = tf.tile(batch_i[:, None], [1, len_batch])\n # [0,1,2,0,1,2,0,1,2,...]\n len_i = tf.tile(len_i[None, :], [size_batch, 1])\n\n indices = tf.stack([batch_i, len_i, batch_sents], -1)\n\n return indices",
"def _iterate_minibatches(self, batch_size, shuffle=True):\n num_input = self.x_train.shape[0]\n indices = np.arange(num_input)\n if shuffle:\n np.random.shuffle(indices)\n for start_idx in range(0, num_input - batch_size + 1, batch_size):\n minibatch = indices[start_idx:start_idx + batch_size]\n yield minibatch",
"def mini_batches(training_data: TrainingDataLoader, mini_batche_size: int):\n\n X = []\n Y = []\n\n for x, y in training_data:\n\n X.append(x)\n Y.append(y)\n\n if len(X) == mini_batche_size:\n yield (np.column_stack((X)), np.column_stack((Y)))\n X = []\n Y = []",
"def batch_start(self, batch_idx, batch_data):\n self.batch = batch_idx",
"def index_batch(self,batch):\n pass",
"def sample_batch_indexes(low, high, size):\n if high - low >= size:\n # We have enough data. Draw without replacement, that is each index is unique in the\n # batch. We cannot use `np.random.choice` here because it is horribly inefficient as\n # the memory grows. See https://github.com/numpy/numpy/issues/2764 for a discussion.\n # `random.sample` does the same thing (drawing without replacement) and is way faster.\n try:\n r = range(low, high)\n except NameError:\n r = range(low, high)\n batch_idxs = random.sample(r, size)\n else:\n # Not enough data. Help ourselves with sampling from the range, but the same index\n # can occur multiple times. This is not good and should be avoided by picking a\n # large enough warm-up phase.\n warnings.warn('Not enough entries to sample without replacement. Consider increasing your warm-up phase to avoid oversampling!')\n batch_idxs = np.random.random_integers(low, high - 1, size=size)\n assert len(batch_idxs) == size\n return batch_idxs",
"def train_batch_idx(self) -> int:\n return self._train_batch_idx",
"def _im2row_index(input_shape,\n block_shape,\n slice_step=(1, 1),\n data_format='NHWC',\n padding='VALID',\n dtype=tf.int64,\n name=None):\n with tf.name_scope(name or 'im2row_index'):\n # 1) Process input arguments.\n batch_shape, s3, s2, s1 = prefer_static.split(\n prefer_static.cast(input_shape, tf.int32),\n num_or_size_splits=[-1, 1, 1, 1])\n fh, fw = _split_pair(block_shape)\n sh, sw = _split_pair(slice_step)\n data_format = _validate_data_format(data_format)\n padding = _validate_padding(padding)\n\n # 2) Assemble all block start positions as indexes into the flattened image.\n if data_format == 'NHWC':\n h, w, c = s3[0], s2[0], s1[0]\n # start_idx.shape = [fh, fw, c]\n start_idx = _cartesian_add([\n prefer_static.range(c * w * fh, delta=c * w, dtype=dtype),\n prefer_static.range(c * fw, delta=c, dtype=dtype),\n prefer_static.range(c, delta=1, dtype=dtype),\n ])\n elif data_format == 'NCHW':\n c, h, w = s3[0], s2[0], s1[0]\n # start_idx.shape = [c, fh, fw]\n start_idx = _cartesian_add([\n prefer_static.range(w * h * c, delta=w * h, dtype=dtype),\n prefer_static.range(w * fh, delta=w, dtype=dtype),\n prefer_static.range(fw, delta=1, dtype=dtype),\n ])\n else:\n assert False # Can't be here.\n\n # 3) Assemble all block offsets (into flattened image).\n if padding == 'VALID':\n eh = h - fh + 1 # extent height\n ew = w - fw + 1 # extent width\n # offset_idx.shape = [eh // sh, ew // sw]\n offset_idx = _cartesian_add([\n prefer_static.range(w * eh, delta=w * sh, dtype=dtype),\n prefer_static.range(ew, delta=sw, dtype=dtype),\n ])\n if data_format == 'NHWC':\n offset_idx *= c\n oh = eh // sh # out height\n ow = ew // sw # out width\n else:\n assert False # Can't be here.\n\n # 4) Combine block start/offset pairs.\n # shape = [(eh // sh) * (ew // sw), fh * fw * c]\n idx = _cartesian_add([offset_idx, start_idx])\n new_shape = [oh, ow, fh * fw * c]\n new_shape = prefer_static.concat([batch_shape, new_shape], axis=0)\n return idx, new_shape",
"def _GetBatchIndices(params_shape, indices, batch_dims):\n batch_indices = indices\n indices_dtype = indices.dtype.base_dtype\n casted_params_shape = math_ops.cast(params_shape, indices_dtype)\n accum_dim_value = array_ops.ones((), dtype=indices_dtype)\n for dim in range(batch_dims, 0, -1):\n dim_value = casted_params_shape[dim - 1]\n accum_dim_value *= casted_params_shape[dim]\n start = array_ops.zeros((), dtype=indices_dtype)\n step = array_ops.ones((), dtype=indices_dtype)\n dim_indices = math_ops.range(start, dim_value, step)\n dim_indices *= accum_dim_value\n dim_shape = array_ops.concat([\n array_ops.tile([1], [dim - 1]), [dim_value],\n array_ops.tile([1], [array_ops.rank(indices) - dim])\n ], axis=0)\n batch_indices += array_ops.reshape(dim_indices, dim_shape)\n\n return batch_indices",
"def minibatches(data, labels, batch_size=1000):\n assert data.shape[0] == len(labels)\n indices = np.random.permutation(data.shape[0])\n data = data[indices, :]\n labels = labels[indices]\n for batch in np.arange(0, data.shape[0], batch_size):\n if batch + batch_size > data.shape[0]: # if data size does not divide evenly, make final batch smaller\n batch_size = data.shape[0] - batch\n yield (\n data[batch:batch+batch_size,:],\n one_hot(labels[batch:batch+batch_size], 10)\n )",
"def get_batch(iterator, batch_size):\n while True:\n center_batch = np.zeros(batch_size, dtype = np.uint32)\n target_batch = np.zeros((batch_size, 1), dtype = np.uint32)\n for index in range(batch_size):\n center_batch[index], target_batch[index] = next(iterator)\n\n yield center_batch, target_batch",
"def _get_batches_starting_indexes(self):\n\n indexes = numpy.arange(0, self.num_frames, self.recurrence)\n indexes = numpy.random.permutation(indexes)\n\n # Shift starting indexes by self.recurrence//2 half the time\n if self.batch_num % 2 == 1:\n indexes = indexes[(indexes + self.recurrence) % self.num_frames_per_proc != 0]\n indexes += self.recurrence // 2\n self.batch_num += 1\n\n num_indexes = self.batch_size // self.recurrence\n batches_starting_indexes = [indexes[i:i+num_indexes] for i in range(0, len(indexes), num_indexes)]\n\n return batches_starting_indexes",
"def getMiniBatch(self, batch_size, beta):\r\n assert beta > 0\r\n N = len(self.buffer)\r\n # function to sample via probability of the transactions\r\n indexes = self._sample_proportional(batch_size)\r\n weights = []\r\n batch_transitions = []\r\n sum = self._it_sum.sum()\r\n prob_min = self._it_min.min() / sum\r\n max_weight = (prob_min * N) ** (-beta) # according to PER paper,\r\n # max weight is used to normalize the weights\r\n for idx in indexes:\r\n prob_sample = self._it_sum[idx] / sum\r\n weight = (prob_sample * N) ** (-beta) # fixes the bias high prob transaction introduce\r\n weights.append(weight)\r\n batch_transitions.append(self.buffer[idx])\r\n weights /= np.ones_like(weights) * max_weight # normalize\r\n return batch_transitions, weights, indexes"
]
| [
"0.6666846",
"0.65015984",
"0.636911",
"0.61453336",
"0.6103114",
"0.6084373",
"0.60636634",
"0.5974345",
"0.591578",
"0.5906937",
"0.58807844",
"0.58341247",
"0.5826688",
"0.58113354",
"0.5802759",
"0.5725827",
"0.57240266",
"0.5720992",
"0.56954914",
"0.567487",
"0.5674246",
"0.5634576",
"0.56252724",
"0.5604673",
"0.5583453",
"0.5561818",
"0.5559913",
"0.555551",
"0.5545431",
"0.5536252"
]
| 0.67924124 | 0 |
Returns the version of the userandjobstate service. | def ver(self, context=None):
return self._client.call_method(
'UserAndJobState.ver',
[], self._service_ver, context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_version(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/version\").json()",
"def get_version(self):\n\n r = self._create_operation_request(self, method=\"GET\")\n root_info = send_session_request(self._session, r).json()\n return root_info[\"currentVersion\"]",
"def version(self):\n response = self._request_call('/version')\n return response.version_etcdserver",
"def version(self):\n return self._client.getVersion()",
"def get_version(self):\n return self.__make_api_call('get/version')",
"def get_version(self):\n pass",
"def version(self):\n return self.get_current_version()",
"def version():\n version_info = pbr.version.VersionInfo('ardana-service')\n return version_info.version_string_with_vcs()",
"def version(self):\n if self._version is None:\n self.version = '{user}-{date}'.format(\n user=getpass.getuser().strip().lower(),\n date=datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d'))\n return self._version",
"async def version(self) -> str:\n response = await self._request(\"status\")\n return response[\"version\"]",
"def GetVersion(self):\n return self._SendRequest(HTTP_GET, \"/version\", None, None)",
"def version(self):\n _, body = self.request('/', 'GET')\n return body.get('version', None)",
"def get_version(self):\n return self.version",
"def version(self):\n return self._get(\"version\")",
"def client_version(self) -> str:\n return pulumi.get(self, \"client_version\")",
"def getversion(self):\n return self.__version",
"def get_version(self):\n return version.__version__",
"def get_version(self):\n return version.__version__",
"def get_server_version(self):\n return self.__aceQLHttpApi.get_server_version()",
"def version(self) -> str:\n return pulumi.get(self, \"version\")",
"def version(self) -> str:\n return pulumi.get(self, \"version\")",
"def version(self) -> str:\n return pulumi.get(self, \"version\")",
"def version(self) -> str:\n return pulumi.get(self, \"version\")",
"def getSupervisorVersion(self):\r\n self._update('getSupervisorVersion')\r\n return VERSION",
"def _get_version(self):",
"def get_version(self):\n return self._version",
"def get_version(self):\n return self._version",
"def version(self) -> 'outputs.VersionResponse':\n return pulumi.get(self, \"version\")",
"def get_version(self):\r\n if not self.endpoint_checker(self.endpointurl):\r\n raise Exception(\"Please use a valid ESRI REST url\")\r\n\r\n parsedurl = urlparse(self.endpointurl)\r\n print(f\"{parsedurl.scheme}://{parsedurl.netloc}/arcgis/rest/services/?f=pjson\")\r\n req = requests.get(\r\n f\"{parsedurl.scheme}://{parsedurl.netloc}/arcgis/rest/services/?f=pjson\"\r\n )\r\n\r\n if req.status_code == 200:\r\n try:\r\n return req.json()[\"currentVersion\"]\r\n except KeyError:\r\n try:\r\n req = requests.get(\r\n self.endpointurl.split(\"services/\")[0] + \"services/?f=pjson\"\r\n )\r\n return req.json()[\"currentVersion\"]\r\n except Exception as e:\r\n raise e\r\n raise Exception(\r\n f\"An Error occurred retrieving vital information, the response status {str(req.status_code)} associate with {req.json()['error']['message']}\"\r\n )",
"def get_version(self):\n data = self._get('app_version')\n return data['version']"
]
| [
"0.6818767",
"0.6731788",
"0.67285407",
"0.66809475",
"0.66694766",
"0.6649794",
"0.6561813",
"0.65589726",
"0.6523883",
"0.6504586",
"0.6461285",
"0.6447625",
"0.64336187",
"0.64096856",
"0.63993865",
"0.63934135",
"0.6366004",
"0.6366004",
"0.6359067",
"0.6342099",
"0.6342099",
"0.6342099",
"0.6342099",
"0.6329988",
"0.63107604",
"0.630486",
"0.630486",
"0.6298919",
"0.62917763",
"0.6278666"
]
| 0.7967833 | 0 |
Set the state of a key for a service without service authentication. | def set_state(self, service, key, value, context=None):
return self._client.call_method(
'UserAndJobState.set_state',
[service, key, value], self._service_ver, context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def set_appservice_state(\n self, service: ApplicationService, state: ApplicationServiceState\n ) -> None:\n await self.db_pool.simple_upsert(\n \"application_services_state\", {\"as_id\": service.id}, {\"state\": state.value}\n )",
"def set_key(self, key):\n self.key = key",
"def set_key(self, key):\n\t\tif key in self.control_map:\n\t\t\tindex = self.control_map.index(key)\n\t\t\tcurrent_key = self.control_map[self.option_index]\n\t\t\tself.control_map[index] = current_key\n\t\t\tself.control_map[self.option_index] = key\n\t\t\treturn\n\t\tself.control_map[self.option_index] = key",
"def key(self, key):\n return self.__key.set(key)",
"def set(self, key, value):",
"def set(self, key, value):",
"def key(self, key):\n self._key = key",
"def key(self, key):\n self._key = key",
"async def statset_appkey(self, key):\n self._set_app_key(key)\n await self.bot.say(\"APP key successfully set.\")",
"def set_incremental_state(module, incremental_state, key, value):\n if incremental_state is not None:\n full_key = _get_full_incremental_state_key(module, key)\n incremental_state[full_key] = value",
"def set(self, key):\n if key == 0:\n self._servo.d_key(.1)\n elif key == 1:\n self._servo.ctrl_d(.1)\n elif key == 2:\n self._servo.ctrl_u(.1)\n elif key == 3:\n self._servo.ctrl_enter(.1)\n elif key == 4:\n self._servo.enter_key(.1)\n elif key == 5:\n self._servo.refresh_key(.1)\n elif key == 6:\n self._servo.ctrl_refresh_key(.1)\n elif key == 7:\n self._servo.sysrq_x(.1)\n else:\n raise kbError(\"Unknown key enum: %s\", key)",
"def key(self, key):\n\n self._key = key",
"def key(self, key):\n\n self._key = key",
"def set_state_auth(self, token, key, value, context=None):\n return self._client.call_method(\n 'UserAndJobState.set_state_auth',\n [token, key, value], self._service_ver, context)",
"async def statset_apikey(self, key):\n self._set_api_key(key)\n await self.bot.say(\"API key successfully set.\")",
"def set(self, key, value):\n raise NotImplementedError",
"def service_code(self, service_code):\n \n self._service_code = service_code",
"async def set(self, key, value):\n trace_log(\"PersistantStorage: setting key \", key, \" to value \", value)\n self.dict[key] = value\n #self.log_set(key, value)",
"def set_status(\n self,\n key: str,\n status: TaskStatus,\n error: Optional[ErrorInfo] = None,\n skipped_by: Optional[str] = None,\n ) -> None:\n raise NotImplementedError",
"def __setstate__(self, state):\n if len(state) != 1:\n raise TypeError('Invalid state length, expected 1; received %i' %\n len(state))\n kwargs = state[0]\n if not isinstance(kwargs, dict):\n raise TypeError('Key accepts a dict of keyword arguments as state; '\n 'received %r' % kwargs)\n self.__reference = None\n self.__pairs = tuple(kwargs['pairs'])\n self.__app = kwargs['app']\n self.__namespace = kwargs['namespace']",
"def set(self, key, value, ttl=0):\n pass",
"def state(self, state: str) -> None:\n try:\n self._redis.set(self._namespace(\"state\"), str(state))\n except RedisError:\n self.logger.error(\"RedisError\", exc_info=True)",
"def set(self, key: t.Hashable, value: t.Any) -> None:",
"def set_key(self, key, value) -> True:\n\n if key in self.key_functs:\n # Validate the value\n real_val = self.key_functs[key](value)\n \n if real_val is not None:\n self.key_values[key] = real_val \n self.key_satified[key] = True\n return True\n\n # Invalid key\n return False",
"def setdefault(self, key):\n pass",
"def update_key(self, key):\n self._api_key = key",
"def set_key_id(self, key_id=''):\n self.key_id = key_id",
"def __setstate__(self, state: Dict[str, Any]):\n self.__dict__.update(state)\n self.__dict__['__db'] = None",
"def update(self, key):\n return self.state",
"def setKey(self, key):\n if hasattr(key, '__class__') and issubclass(key.__class__, ObjectKey):\n key = key.getKey()\n\n self.__key = key"
]
| [
"0.6455193",
"0.6354306",
"0.6153753",
"0.5945854",
"0.59108603",
"0.59108603",
"0.59096426",
"0.59096426",
"0.58875173",
"0.5881037",
"0.5849215",
"0.58078116",
"0.58078116",
"0.5740302",
"0.5738482",
"0.5725617",
"0.57177335",
"0.5695139",
"0.5683244",
"0.5679103",
"0.56657284",
"0.5652458",
"0.5637539",
"0.56071764",
"0.5604866",
"0.55784637",
"0.5574023",
"0.5572574",
"0.5561165",
"0.555764"
]
| 0.7107616 | 0 |
Set the state of a key for a service with service authentication. | def set_state_auth(self, token, key, value, context=None):
return self._client.call_method(
'UserAndJobState.set_state_auth',
[token, key, value], self._service_ver, context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_state(self, service, key, value, context=None):\n return self._client.call_method(\n 'UserAndJobState.set_state',\n [service, key, value], self._service_ver, context)",
"async def set_appservice_state(\n self, service: ApplicationService, state: ApplicationServiceState\n ) -> None:\n await self.db_pool.simple_upsert(\n \"application_services_state\", {\"as_id\": service.id}, {\"state\": state.value}\n )",
"def set_key(self, key):\n self.key = key",
"def set_auth_state(self, data):\n raise NotImplementedError()",
"def set(self, key, value):",
"def set(self, key, value):",
"def set_key(self, key):\n\t\tif key in self.control_map:\n\t\t\tindex = self.control_map.index(key)\n\t\t\tcurrent_key = self.control_map[self.option_index]\n\t\t\tself.control_map[index] = current_key\n\t\t\tself.control_map[self.option_index] = key\n\t\t\treturn\n\t\tself.control_map[self.option_index] = key",
"def service_account(self, service_account):\n\n self._service_account = service_account",
"def api_key_set(self, api_key):\n self.request('/v1.1/auth_key', 'POST', body={'auth_key': api_key})",
"def set(self, key, value):\n self.context.set(self.prefix+'.'+key, value)",
"def service_code(self, service_code):\n \n self._service_code = service_code",
"async def statset_appkey(self, key):\n self._set_app_key(key)\n await self.bot.say(\"APP key successfully set.\")",
"def set_incremental_state(module, incremental_state, key, value):\n if incremental_state is not None:\n full_key = _get_full_incremental_state_key(module, key)\n incremental_state[full_key] = value",
"def set_AuthenticationKey(self, value):\n super(AddressValidationInputSet, self)._set_input('AuthenticationKey', value)",
"def key(self, key):\n self._key = key",
"def key(self, key):\n self._key = key",
"def set(self, key):\n if key == 0:\n self._servo.d_key(.1)\n elif key == 1:\n self._servo.ctrl_d(.1)\n elif key == 2:\n self._servo.ctrl_u(.1)\n elif key == 3:\n self._servo.ctrl_enter(.1)\n elif key == 4:\n self._servo.enter_key(.1)\n elif key == 5:\n self._servo.refresh_key(.1)\n elif key == 6:\n self._servo.ctrl_refresh_key(.1)\n elif key == 7:\n self._servo.sysrq_x(.1)\n else:\n raise kbError(\"Unknown key enum: %s\", key)",
"def key(self, key):\n\n self._key = key",
"def key(self, key):\n\n self._key = key",
"async def statset_apikey(self, key):\n self._set_api_key(key)\n await self.bot.say(\"API key successfully set.\")",
"def service_account(self, service_account: str):\n\n self._service_account = service_account",
"def __setstate__(self, state):\n if len(state) != 1:\n raise TypeError('Invalid state length, expected 1; received %i' %\n len(state))\n kwargs = state[0]\n if not isinstance(kwargs, dict):\n raise TypeError('Key accepts a dict of keyword arguments as state; '\n 'received %r' % kwargs)\n self.__reference = None\n self.__pairs = tuple(kwargs['pairs'])\n self.__app = kwargs['app']\n self.__namespace = kwargs['namespace']",
"async def set(self, key, value):\n trace_log(\"PersistantStorage: setting key \", key, \" to value \", value)\n self.dict[key] = value\n #self.log_set(key, value)",
"def set_service(\n self,\n service_type: str,\n alias: str,\n service_config: Dict,\n save: bool = True,\n config_encrypted: bool = False,\n ):\n # we only validate attributes when they aren't encrypted\n # if we are setting a service from an encrypted file then it has already been validated\n if not config_encrypted:\n self._validate_service(service_type, alias, service_config)\n self._set_service(\n service_type,\n alias,\n service_config,\n save=save,\n config_encrypted=config_encrypted,\n )",
"def key(self, key):\n return self.__key.set(key)",
"def service(self, service):\n \n self._service = service",
"def set(self, key, value):\n raise NotImplementedError",
"def set_service(self):\n\n if self.service:\n self.service = self.service(\n json=self.json,\n google_user=self.google_user,\n endpoint=self\n )",
"def set(self, key, value):\r\n self.set_many({key: value})",
"def setup_service_key():\n if get_var('AFS_AKIMPERSONATE'):\n keytab = get_var('KRB_AFS_KEYTAB')\n if keytab and not os.path.exists(keytab):\n cell = get_var('AFS_CELL')\n realm = get_var('KRB_REALM')\n enctype = get_var('KRB_AFS_ENCTYPE')\n _KeytabKeywords().create_service_keytab(keytab, cell, realm, enctype, akimpersonate=True)\n if get_var('AFS_KEY_FILE') == 'KeyFile':\n run_keyword(\"Create Key File\")\n elif get_var('AFS_KEY_FILE') == 'rxkad.keytab':\n run_keyword(\"Install rxkad-k5 Keytab\")\n elif get_var('AFS_KEY_FILE') == 'KeyFileExt':\n run_keyword(\"Create Extended Key File\", get_var('KRB_AFS_ENCTYPE'))\n else:\n raise AssertionError(\"Unsupported AFS_KEY_FILE! %s\" % (get_var('AFS_KEY_FILE')))"
]
| [
"0.74339426",
"0.65713215",
"0.63565004",
"0.60883474",
"0.59891087",
"0.59891087",
"0.59362435",
"0.59221303",
"0.591146",
"0.59027886",
"0.5892569",
"0.58422506",
"0.5821213",
"0.58127016",
"0.58119357",
"0.58119357",
"0.5793126",
"0.5768279",
"0.5768279",
"0.5765366",
"0.57576925",
"0.5752617",
"0.5734117",
"0.57197726",
"0.5719102",
"0.570187",
"0.56609094",
"0.5654563",
"0.5650976",
"0.5646783"
]
| 0.69348943 | 1 |
Create a new job status report. create_job2 | def create_job(self, context=None):
return self._client.call_method(
'UserAndJobState.create_job',
[], self._service_ver, context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_job2(self, params, context=None):\n return self._client.call_method(\n 'UserAndJobState.create_job2',\n [params], self._service_ver, context)",
"def create_report(self, report_job: dict):\n try:\n # Run the report and wait for it to finish\n report_job_id = self.report_downloader.WaitForReport(report_job)\n return report_job_id\n except errors.AdManagerReportError as e:\n print('[INFO]: Failed to generate report. Error: %s' % e)\n sys.exit()",
"def create_job(api_instance, job):\n api_response = api_instance.create_namespaced_job(\n body=job, namespace=\"default\", pretty=True\n )\n logger.info(\"Job created with status='%s'\" % str(api_response.status))\n return api_response",
"async def createStatus(self, *args, **kwargs):\n\n return await self._makeApiCall(self.funcinfo[\"createStatus\"], *args, **kwargs)",
"def created_job(new_job, bulk_request):\n bulk_request.return_value = '''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <jobInfo xmlns=\"http://www.force.com/2009/06/asyncapi/dataload\">\n <id>THEJOBID</id>\n <operation>update</operation>\n <object>Lead</object>\n </jobInfo>\n '''\n new_job.create()\n return new_job",
"def do_status(self, args):\n status = self._leet.job_status\n\n for job in self.finished_jobs:\n status.append({\"id\" : job.id,\n \"hostname\" : job.machine.hostname,\n \"plugin\": job.plugin_instance.LEET_PG_NAME,\n \"status\" : job.status})\n if status:\n pretty_jobs_status(status)\n else:\n print(\"***No jobs pending\")",
"def send_job(self):\n graph = self.processgraphEdit.toPlainText()\n # info(self.iface, graph)\n response = self.connection.job_create(json.loads(graph))\n if response.status_code == 201:\n info(self.iface, \"Successfully created new job, Response: {}\".format(response.status_code))\n else:\n warning(self.iface, \"Not able to created new job, Response: {}\".format(str(response.json())))",
"def create_job(self, job):\n call = subprocess.Popen(self.cli + [PlatformJenkinsJavaCLI.CREATE_JOB, job.name], stdin=subprocess.PIPE)\n out, err = call.communicate(input=platform_ci.jjb.get_job_as_xml(job, self.template_dir))\n call.wait()\n if call.returncode != 0:\n logging.info(out)\n logging.error(err)\n raise PlatformJenkinsException(\"Creating job failed: \" + job.name)",
"def record_task_status(status, job_name, task_id, version, task_info, print_progress=False):\n cjrdb_conn = CJRDBConnection()\n if cjrdb_conn is None:\n raise Exception(\"Could not create the connection object...\")\n cjrdb_conn.create_db_tables()\n\n if status == JobStatus.START:\n record_task_start(job_name, task_id, version, task_info, cjrdb_conn, print_progress)\n elif status == JobStatus.FINISH:\n record_task_finish(job_name, task_id, version, task_info, cjrdb_conn, print_progress)\n elif status == JobStatus.UPDATE:\n record_task_update(job_name, task_id, version, task_info, cjrdb_conn, print_progress)\n else:\n raise Exception(\"Do not recognise the status inputted.\")",
"def test_job_exists():\n with tempfile.TemporaryDirectory() as STATUS_DIR:\n Status.add_job(STATUS_DIR, 'generation', 'test1',\n job_attrs={'job_status': 'submitted'})\n exists = Status.job_exists(STATUS_DIR, 'test1')\n assert exists",
"def test_create_status(self):\n self.basic_login()\n cassette_name = self.cassette_name('create_status')\n with self.recorder.use_cassette(cassette_name):\n repository = self.gh.repository('sigmavirus24', 'github3.py')\n assert repository is not None\n deployment = find(lambda d: d.id == 801,\n repository.iter_deployments())\n assert deployment is not None\n status = deployment.create_status('success')\n\n assert isinstance(status, github3.repos.deployment.DeploymentStatus)",
"def create(self, cr, uid, vals, context=None):\n vals.update({'ref': self.pool.get('ir.sequence').get(\n cr, uid, 'maintenance.job')})\n return super(maintenance_job, self).create(cr, uid, vals, context=context)",
"def test_job_addition():\n with tempfile.TemporaryDirectory() as STATUS_DIR:\n Status.add_job(STATUS_DIR, 'generation', 'test1')\n status1 = Status(STATUS_DIR).data['generation']['test1']['job_status']\n\n Status.add_job(STATUS_DIR, 'generation', 'test1',\n job_attrs={'job_status': 'finished',\n 'additional': 'test'})\n status2 = Status(STATUS_DIR).data['generation']['test1']['job_status']\n\n assert status2 == status1",
"def create_job_detail(company_name, job_title, application_deadline, job_listing_url, state, city, application_listed, salary):\n\n job_detail = JobDetail(company_name = company_name, job_title = job_title, application_deadline = application_deadline, job_listing_url = job_listing_url, state = state , city = city, application_listed = application_listed, salary = salary)\n db.session.add(job_detail)\n db.session.commit()\n\n return job_detail",
"def create_job(jobtype, server):\n name = generate_job_name(jobtype)\n job = Job.objects.create(jobtype=jobtype, server=server, name=name)\n return job",
"def test_make_file():\n with tempfile.TemporaryDirectory() as STATUS_DIR:\n Status.make_job_file(STATUS_DIR, 'generation', 'test1', TEST_1_ATTRS_1)\n status = Status.retrieve_job_status(STATUS_DIR, 'generation', 'test1')\n msg = 'Failed, status is \"{}\"'.format(status)\n assert status == 'R', msg",
"def report(self):\n\n job_summary = {}\n for job in self._jobs:\n \n if job.step_name not in job_summary:\n job_summary[ job.step_name ] = {}\n job_summary[ job.step_name ][ 'DONE' ] = 0\n job_summary[ job.step_name ][ 'RUNNING' ] = 0\n job_summary[ job.step_name ][ 'QUEUING' ] = 0\n job_summary[ job.step_name ][ 'FAILED' ] = 0\n job_summary[ job.step_name ][ 'UNKNOWN' ] = 0\n job_summary[ job.step_name ][ 'max_mem' ] = 0\n job_summary[ job.step_name ][ 'cputime' ] = 0\n\n if job.status == Job_status.FINISHED:\n job_summary[ job.step_name ][ 'DONE' ] += 1\n if job.cputime is not None:\n job_summary[ job.step_name ]['cputime'] += int(job.cputime)\n\n if job.max_memory is not None and job.max_memory > job_summary[ job.step_name ][ 'max_mem']:\n job_summary[ job.step_name ][ 'max_mem'] = int(job.max_memory)\n\n elif job.status == Job_status.RUNNING:\n job_summary[ job.step_name ][ 'RUNNING' ] += 1\n elif job.status == Job_status.QUEUEING or job.status == Job_status.SUBMITTED:\n job_summary[ job.step_name ][ 'QUEUING' ] += 1\n elif job.status == Job_status.FAILED or job.status == Job_status.NO_RESTART:\n job_summary[ job.step_name ][ 'FAILED' ] += 1\n else:\n job_summary[ job.step_name ][ 'UNKNOWN' ] += 1\n\n\n\n local_time = strftime(\"%d/%m/%Y %H:%M\", time.localtime())\n \n\n pickle_file = \"{}.{}\".format(self.pipeline.project_name, self.pipeline._pid)\n\n print(\"[{} @{} {}]\".format( local_time,self.pipeline._hostname , pickle_file))\n\n print(\"{:20} || {:12} || {:12} || {:2s} {:2s} {:2s} {:2s} {:2s}\".format(\"Run stats\", \"Runtime\", \"Max Mem\", \"D\",\"R\",\"Q\",\"F\",\"U\"))\n\n for step in sorted(self.pipeline._workflow._analysis_order, key=self.pipeline._workflow._analysis_order.__getitem__):\n if step not in job_summary:\n continue\n\n print(\"{:20} || {:12} || {:12} || {:02d}/{:02d}/{:02d}/{:02d}/{:02d}\".format(step, \n self.format_time(job_summary[ step ]['cputime']),\n self.format_memory(job_summary[ step ]['max_mem']),\n job_summary[ step ][ 'DONE' ],\n job_summary[ step ][ 'RUNNING' ],\n job_summary[ step ][ 'QUEUING' ],\n job_summary[ step ][ 'FAILED' ],\n job_summary[ step ][ 'UNKNOWN' ]))",
"def create_call_status(job, internal_storage):\n monitoring_backend = job.config['lithops']['monitoring']\n Status = getattr(lithops.worker.status, '{}CallStatus'\n .format(monitoring_backend.capitalize()))\n return Status(job, internal_storage)",
"def test_create_job(self):\n engine = Engine(self.config_file, self.api_token)\n\n engine.create_job()\n\n assert engine.ingest_job_id == 23",
"def test_post_job(self):\n body = UnitTesterJobCreateReq()\n response = self.client.open(\n '/v1/job',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def create_and_start_job(self, token, status, desc, progress, est_complete, context=None):\n return self._client.call_method(\n 'UserAndJobState.create_and_start_job',\n [token, status, desc, progress, est_complete], self._service_ver, context)",
"def create(self, resource, **data):\n body = ''\n if resource == 'robot/job':\n body = data['body']\n else:\n body = urllib.urlencode(data)\n\n return self.request('/' + resource, 'POST', body=body)",
"def __create_at_job(self, command, detail=''):\n\t\tstarted = int(time())\n\t\tlogfile = self._current_job['logfile']\n\t\tlines = self._current_job['lines']\n\t\tscript = '''\n#:started: %s\n#:detail: %s\n#:logfile: %s\n#:lines: %s\n#:command: %s\n/usr/share/univention-updater/disable-apache2-umc\n%s < /dev/null\n/usr/share/univention-updater/enable-apache2-umc --no-restart\n''' % (started,detail,logfile,lines,command,command)\n\t\tp1 = subprocess.Popen( [ 'LC_ALL=C at now', ], stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = True )\n\t\t(stdout,stderr) = p1.communicate( script )\n\n\t\tif p1.returncode != 0:\n\t\t\treturn (p1.returncode,stderr)\n\t\telse:\n\t\t\treturn (p1.returncode,stdout)",
"def create(cfg, jobs):\n server = jenkins_utils.server_factory(cfg)\n libjobs.createJobs(server, jobs)",
"def job(username, root_wf_id, wf_id, job_id, job_instance_id):\n dashboard = Dashboard(g.master_db_url, root_wf_id, wf_id)\n job = dashboard.get_job_information(wf_id, job_id, job_instance_id)\n job_states = dashboard.get_job_states(wf_id, job_id, job_instance_id)\n job_instances = dashboard.get_job_instances(wf_id, job_id)\n\n previous = None\n\n for state in job_states:\n timestamp = state.timestamp\n state.timestamp = datetime.fromtimestamp(state.timestamp).strftime('%a %b %d, %Y %I:%M:%S %p')\n\n if previous is None:\n state.interval = 0.0\n else:\n state.interval = timestamp - previous\n\n previous = timestamp\n\n if not job:\n return 'Bad Request', 400\n\n return render_template('workflow/job/job_details.html', root_wf_id=root_wf_id, wf_id=wf_id, job_id=job_id, job=job,\n job_instances=job_instances, job_states=job_states)",
"def upcreate_training_status(\n project_id,\n status: str,\n log: str,\n performance: str = \"{}\",\n need_to_send_notification: bool = False,\n):\n logger.info(\"Updating Training Status :%s\", status)\n logger.info(\"Updating Training Log :%s\", log)\n logger.info(\"need_to_send_notification :%s\", need_to_send_notification)\n\n training_status_object = TrainingStatus.objects.get(project_id=project_id)\n training_status_object.status = status\n training_status_object.log = log.capitalize()\n training_status_object.performance = performance\n training_status_object.need_to_send_notification = need_to_send_notification\n training_status_object.save()",
"def setup_jobs_for_status_check(cls, sess, submission_id):\n job_values = {\n 'uploadFinished': [FILE_TYPE_DICT['award'], JOB_STATUS_DICT['finished'],\n JOB_TYPE_DICT['file_upload'], None, None, None],\n 'recordRunning': [FILE_TYPE_DICT['award'], JOB_STATUS_DICT['running'],\n JOB_TYPE_DICT['csv_record_validation'], None, None, None],\n 'awardFin': [FILE_TYPE_DICT['award_financial'], JOB_STATUS_DICT['ready'],\n JOB_TYPE_DICT['csv_record_validation'], \"awardFin.csv\", 100, 100],\n 'appropriations': [FILE_TYPE_DICT['appropriations'], JOB_STATUS_DICT['ready'],\n JOB_TYPE_DICT['csv_record_validation'], \"approp.csv\", 2345, 567],\n 'program_activity': [FILE_TYPE_DICT['program_activity'], JOB_STATUS_DICT['ready'],\n JOB_TYPE_DICT['csv_record_validation'], \"programActivity.csv\", None, None],\n 'cross_file': [None, JOB_STATUS_DICT['finished'], JOB_TYPE_DICT['validation'], 2, None, None, None]\n }\n job_id_dict = {}\n approp_job = None\n\n for job_key, values in job_values.items():\n job = FileTests.insert_job(\n sess,\n filetype=values[0],\n status=values[1],\n type_id=values[2],\n submission=submission_id,\n filename=values[3],\n file_size=values[4],\n num_rows=values[5]\n )\n job_id_dict[job_key] = job.job_id\n if job_key == 'appropriations':\n approp_job = job\n elif job_key == 'cross_file':\n cross_file_job = job\n\n # For appropriations job, create an entry in file for this job\n file_rec = File(\n job_id=job_id_dict[\"appropriations\"],\n filename=\"approp.csv\",\n file_status_id=FILE_STATUS_DICT['complete'],\n headers_missing=\"missing_header_one, missing_header_two\",\n headers_duplicated=\"duplicated_header_one, duplicated_header_two\")\n sess.add(file_rec)\n\n cross_file = File(\n job_id=job_id_dict[\"cross_file\"],\n filename=\"approp.csv\",\n file_status_id=FILE_STATUS_DICT['complete'],\n headers_missing=\"\",\n headers_duplicated=\"\")\n sess.add(cross_file)\n\n # Put some entries in error data for approp job\n rule_error = ErrorMetadata(\n job_id=job_id_dict[\"appropriations\"],\n filename=\"approp.csv\",\n field_name=\"header_three\",\n error_type_id=ERROR_TYPE_DICT['rule_failed'],\n occurrences=7,\n rule_failed=\"Header three value must be real\",\n original_rule_label=\"A1\",\n file_type_id=FILE_TYPE_DICT['appropriations'],\n target_file_type_id=FILE_TYPE_DICT['award'],\n severity_id=RULE_SEVERITY_DICT['fatal']\n )\n approp_job.number_of_errors += 7\n sess.add(rule_error)\n\n warning_error = ErrorMetadata(\n job_id=job_id_dict[\"appropriations\"],\n filename=\"approp.csv\",\n field_name=\"header_three\",\n error_type_id=ERROR_TYPE_DICT['rule_failed'],\n occurrences=7,\n rule_failed=\"Header three value looks odd\",\n original_rule_label=\"A2\",\n file_type_id=FILE_TYPE_DICT['appropriations'],\n target_file_type_id=FILE_TYPE_DICT['award'],\n severity_id=RULE_SEVERITY_DICT['warning']\n )\n approp_job.number_of_warnings += 7\n sess.add(warning_error)\n\n req_error = ErrorMetadata(\n job_id=job_id_dict[\"appropriations\"],\n filename=\"approp.csv\",\n field_name=\"header_four\",\n error_type_id=ERROR_TYPE_DICT['required_error'],\n occurrences=5,\n rule_failed=\"This field is required for all submissions but was not provided in this row.\",\n severity_id=RULE_SEVERITY_DICT['fatal']\n )\n approp_job.number_of_errors += 5\n sess.add(req_error)\n\n cross_error = ErrorMetadata(\n job_id=job_id_dict[\"cross_file\"],\n filename=\"approp.csv\",\n field_name=\"header_four\",\n error_type_id=ERROR_TYPE_DICT['required_error'],\n occurrences=5,\n rule_failed=\"This field is required for all submissions but was not provided in this row.\",\n file_type_id=FILE_TYPE_DICT['appropriations'],\n target_file_type_id=FILE_TYPE_DICT['award'],\n severity_id=RULE_SEVERITY_DICT['fatal']\n )\n cross_file_job.number_of_errors += 5\n sess.add(cross_error)\n\n sess.commit()\n return job_id_dict",
"def test_sample_status_custom(self):\n self.app = self.make_app(argv = ['report', 'sample_status', self.examples[\"project\"], self.examples[\"flowcell\"], '--debug', '--customer_reference', 'MyCustomerReference', '--uppnex_id', 'MyUppnexID', '--ordered_million_reads', '10', '--phix', '{1:0.1, 2:0.2}'],extensions=['scilifelab.pm.ext.ext_couchdb'])\n handler.register(DeliveryReportController)\n self._run_app()\n data = ast.literal_eval(self.app._output_data['debug'].getvalue())\n s_param_map = {x[\"scilifelab_name\"]:x for x in data[\"s_param\"]}\n self.assertEqual(s_param_map['P001_101_index3']['uppnex_project_id'], 'MyUppnexID')\n self.assertEqual(s_param_map['P001_101_index3']['customer_reference'], 'MyCustomerReference')\n self.assertEqual(s_param_map['P001_101_index3']['ordered_amount'], 10)",
"def testDuplicateJobReports(self):\n change = ChangeState(self.config, \"changestate_t\")\n\n locationAction = self.daoFactory(classname=\"Locations.New\")\n locationAction.execute(\"site1\", pnn=\"T2_CH_CERN\")\n\n testWorkflow = Workflow(spec=self.specUrl, owner=\"Steve\",\n name=\"wf001\", task=self.taskName)\n testWorkflow.create()\n testFileset = Fileset(name=\"TestFileset\")\n testFileset.create()\n\n testFile = File(lfn=\"SomeLFNC\", locations=set([\"T2_CH_CERN\"]))\n testFile.create()\n testFileset.addFile(testFile)\n testFileset.commit()\n\n testSubscription = Subscription(fileset=testFileset,\n workflow=testWorkflow)\n testSubscription.create()\n\n splitter = SplitterFactory()\n jobFactory = splitter(package=\"WMCore.WMBS\",\n subscription=testSubscription)\n jobGroup = jobFactory(files_per_job=1)[0]\n\n assert len(jobGroup.jobs) == 1, \\\n \"Error: Splitting should have created one job.\"\n\n testJobA = jobGroup.jobs[0]\n testJobA[\"user\"] = \"sfoulkes\"\n testJobA[\"group\"] = \"DMWM\"\n testJobA[\"taskType\"] = \"Processing\"\n\n change.propagate([testJobA], 'created', 'new')\n myReport = Report()\n reportPath = os.path.join(getTestBase(),\n \"WMCore_t/JobStateMachine_t/Report.pkl\")\n myReport.unpersist(reportPath)\n testJobA[\"fwjr\"] = myReport\n\n change.propagate([testJobA], 'executing', 'created')\n change.propagate([testJobA], 'executing', 'created')\n\n changeStateDB = self.couchServer.connectDatabase(dbname=\"changestate_t/fwjrs\")\n allDocs = changeStateDB.document(\"_all_docs\")\n\n self.assertEqual(len(allDocs[\"rows\"]), 2,\n \"Error: Wrong number of documents\")\n\n for resultRow in allDocs[\"rows\"]:\n if resultRow[\"id\"] != \"_design/FWJRDump\":\n changeStateDB.document(resultRow[\"id\"])\n break\n\n return",
"def t_status_process(self, *args, **kwargs):\n\n self.dp.qprint(\"In status process...\")\n\n d_state = self.job_state(*args, **kwargs)\n\n d_ret = d_state['d_ret']\n b_status = d_state['status']\n\n l_keys = d_ret.items()\n l_status = []\n for i in range(0, int(len(l_keys)/2)):\n b_startEvent = d_ret['%s.start' % str(i)]['startTrigger'][0]\n try:\n endcode = d_ret['%s.end' % str(i)]['returncode'][0]\n except:\n endcode = None\n\n if endcode == None and b_startEvent:\n l_status.append('started')\n if not endcode and b_startEvent and type(endcode) is int:\n l_status.append('finishedSuccessfully')\n if endcode and b_startEvent:\n l_status.append('finishedWithError')\n\n self.dp.qprint('b_startEvent = %d' % b_startEvent)\n self.dp.qprint(endcode)\n self.dp.qprint('l_status = %s' % l_status)\n\n d_ret['l_status'] = l_status\n return {\"d_ret\": d_ret,\n \"status\": b_status}"
]
| [
"0.64179003",
"0.6149524",
"0.61436284",
"0.60143",
"0.59219",
"0.59107804",
"0.58727473",
"0.5780395",
"0.5730901",
"0.5729058",
"0.5695275",
"0.5693628",
"0.5683138",
"0.56777763",
"0.5668688",
"0.56442016",
"0.5640068",
"0.5640025",
"0.5622313",
"0.55997634",
"0.55959255",
"0.5589186",
"0.5582213",
"0.55798084",
"0.55775446",
"0.55425334",
"0.5535583",
"0.5502041",
"0.54975855",
"0.546902"
]
| 0.6266893 | 1 |
Update the status and progress for a job. | def update_job_progress(self, job, token, status, prog, est_complete, context=None):
return self._client.call_method(
'UserAndJobState.update_job_progress',
[job, token, status, prog, est_complete], self._service_ver, context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update(self) -> None:\n self.previous_status = self.status\n\n jobs = self._client.describe_jobs(jobs = [ self.id ])[\"jobs\"]\n\n try:\n self.state = jobs[0]\n except IndexError:\n raise ValueError(\"Invalid or unknown job id %s\" % self.id) from None",
"def update_progress(self):\n report = self.build_progress_report()\n self.conduit.set_progress(report)",
"def update_progress(self):\n report = self.build_progress_report()\n self.conduit.set_progress(report)",
"def update_progress(job_title, progress):\n \n length = 20 # modify this to change the length\n block = int(round(length*progress))\n msg = \"\\r{0}: [{1}] {2}%\".format(job_title, \"#\"*block + \"-\"*(length-block), round(progress*100, 2))\n if progress >= 1: msg += \" DONE\\r\\n\"\n sys.stdout.write(msg)\n sys.stdout.flush()",
"def update_job_state(self, job):",
"def update(self, message=\"\"):\n\n old_message = self.status\n self.status = message\n\n # The change in progress since last update\n delta = self.progress - self.old_progress\n\n if not self.parent.simple_tui:\n # Update the progress bar\n # `start_task` called everytime to ensure progress is remove from indeterminate state\n self.parent.rich_progress_bar.start_task(self.task_id)\n self.parent.rich_progress_bar.update(\n self.task_id,\n description=escape(self.song_name),\n message=message,\n completed=self.progress,\n )\n\n # If task is complete\n if self.progress == 100 or message == \"Error\":\n self.parent.overall_completed_tasks += 1\n self.parent.rich_progress_bar.remove_task(self.task_id)\n else:\n # If task is complete\n if self.progress == 100 or message == \"Error\":\n self.parent.overall_completed_tasks += 1\n\n # When running web ui print progress\n # only one time when downloading/converting/embedding\n if self.parent.web_ui and old_message != self.status:\n logger.info(\"%s: %s\", self.song_name, message)\n elif not self.parent.web_ui and delta:\n logger.info(\"%s: %s\", self.song_name, message)\n\n # Update the overall progress bar\n if self.parent.song_count == self.parent.overall_completed_tasks:\n self.parent.overall_progress = self.parent.song_count * 100\n else:\n self.parent.overall_progress += delta\n\n self.parent.update_overall()\n self.old_progress = self.progress\n\n if self.parent.update_callback:\n self.parent.update_callback(self, message)",
"def progress_status(self):\n from tqdm import tqdm\n pbar_a = tqdm(total=len(self.jobs), position=0)\n pbar_a.set_description('Submitted jobs ...')\n pbar_b = tqdm(total=self.n_submit_script, position=1)\n pbar_b.set_description('Running jobs ...')\n pbar_c = tqdm(total=self.n_submit_script, position=2)\n pbar_c.set_description('Completed jobs ...')\n pbar_d = tqdm(total=self.n_submit_script, position=3)\n pbar_d.set_description('Failed? jobs ...')\n while self.n_completed < self.n_submit_script:\n pbar_a.n = self.n_submitted\n pbar_b.n = self.n_running\n pbar_c.n = self.n_completed\n pbar_d.n = self.n_failed + self.n_unknown\n pbar_a.refresh()\n pbar_b.refresh()\n pbar_c.refresh()\n pbar_d.refresh()\n sleep(5)\n self.update_status()",
"def _update(self):\n _logme.log('Updating job.', 'debug')\n self._updating = True\n if self.done or not self.submitted:\n self._updating = False\n return\n self.queue.update()\n if self.id:\n queue_info = self.queue[self.id]\n if queue_info:\n assert self.id == queue_info.id\n self.queue_info = queue_info\n self.state = self.queue_info.state\n if self.state == 'completed':\n if not self._got_exitcode:\n self.get_exitcode()\n if not self._got_times:\n self.get_times()\n self._updating = False",
"def update_status(cls):\n for job in cls.query.filter(cls.finished == False):\n num_hits_left = session.query(BoxHit).filter_by(training_job_id = job.id, outstanding=True).count()\n urls_left = session.query(VideoTrainingURL).filter_by(training_job_id=job.id, processed = False)\n dynamo = DynamoIngestionStatusClient()\n num_urls_left = 0\n for url in urls_left:\n dynamo_url = dynamo.get(url.url)\n if dynamo_url is None or dynamo_url['status'] == 'Failed':\n # will never be processed, so ignore for our purposes\n url.processed = True\n else:\n num_urls_left += 1\n if num_hits_left+num_urls_left == 0:\n job.finished = True\n print '*** Job ID: %s is complete ***' % str(job.id)\n\n print '------------- Stats for Job ID: %s -------------' % str(job.id)\n print 'Total URLs : %i' % VideoTrainingURL.query.filter_by(training_job_id = job.id).count()\n print 'Total HITs : %i' % BoxHit.query.filter_by(training_job_id = job.id).count()\n if not job.finished:\n print 'unprocessed URLs: %i' % num_urls_left\n print 'outstanding HITs: %i\\n' % num_hits_left\n session.flush()",
"def _update(self, data):\n self.status = data['status']\n self.progress = data['progress']",
"def update_job_status(jid, new_status):\n rd.hset(_generate_job_key(jid), 'status', new_status)",
"def update_job_status(jid, new_status):\n jrd.hset(_generate_job_key(jid), 'status', new_status)",
"def on_job_update(_job):\n nonlocal job\n job = _job",
"def on_job_update(_job):\n nonlocal job\n job = _job",
"def on_job_update(_job):\n nonlocal job\n job = _job",
"def on_job_update(_job):\n nonlocal job\n job = _job",
"def queueStatus(self, job):\n self.status_pool.apply_async(self.statusJob, (job,))",
"def _update_status(self, status: dict):\n with generate_retry_session() as session:\n session.headers.update({\n 'Authorization': 'Bearer {}'.format(self.platform_auth_token)\n })\n url = '{}/training/definitions/{}/jobs/{}/status'.format(\n ORGANIZATION_ENDPOINT, self.job_definition_name, self.training_job_id)\n res = session.put(url, json=status)\n res.raise_for_status()",
"def updater_job_status(self,request):\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/installer/status invoked with:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(request.options).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" << %s\" % s)\n\t\t# -----------------------------------\n\n\t\t# First check if a job is running. This will update the\n\t\t# internal field self._current_job, or if the job is finished,\n\t\t# it would return an empty string.\n\t\tinst = self.__which_job_is_running()\n\n\t\tjob = request.options.get('job','')\n\t\tresult = {}\n\t\tif job in INSTALLERS:\n\t\t\t# make a copy, not a reference!\n#\t\t\tresult = {}\n#\t\t\tfor arg in INSTALLERS[job]:\n#\t\t\t\tresult[arg] = INSTALLERS[job][arg]\n\t\t\tresult = deepcopy(INSTALLERS[job])\n\n\t\t\tif 'statusfile' in INSTALLERS[job]:\n\t\t\t\ttry:\n\t\t\t\t\tfor line in open(INSTALLERS[job]['statusfile']):\n\t\t\t\t\t\tfields = line.strip().split('=')\n\t\t\t\t\t\tif len(fields) == 2:\n\t\t\t\t\t\t\tresult['_%s_' % fields[0]] = fields[1]\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\t# if we encounter that the frontend asks about the last job we\n\t\t\t# have executed -> include its properties too.\n\t\t\tif self._current_job:\n\t\t\t\tif self._current_job['job'] == job:\n\t\t\t\t\tfor f in self._current_job:\n\t\t\t\t\t\tresult[f] = self._current_job[f]\n\t\t\t\t\t\tif isinstance(result[f],str) and result[f].isdigit():\n\t\t\t\t\t\t\tresult[f] = int(result[f])\n\t\t\t\tif inst == '':\n\t\t\t\t\tresult['running'] = False\n\t\t\telse:\n\t\t\t\t# no job running but status for release was asked? \n\t\t\t\t# maybe the server restarted after job finished\n\t\t\t\t# and the frontend did not get that information\n\t\t\t\t# Bug #26318\n\t\t\t\tif job == 'release':\n\t\t\t\t\tresult['detail'] = '%s-%s' % (self.ucr.get('version/version'), self.ucr.get('version/patchlevel'))\n\t\t\t\telse:\n\t\t\t\t\tresult['detail'] = _('Unknown')\n\n\t\t\t# -------------- additional fields -----------------\n\n\t\t\t# elapsed time, ready to be displayed. (not seconds, but rather\n\t\t\t# the formatted string)\n\t\t\tif 'time' in result and 'started' in result:\n\t\t\t\telapsed = result['time'] - result['started']\n\t\t\t\tif elapsed < 60:\n\t\t\t\t\tresult['elapsed'] = '%ds' % elapsed\n\t\t\t\telse:\n\t\t\t\t\tmins = int(elapsed/60)\n\t\t\t\t\tsecs = elapsed - (60 * mins)\n\t\t\t\t\tif mins < 60:\n\t\t\t\t\t\tresult['elapsed'] = '%d:%02dm' % (mins,secs)\n\t\t\t\t\telse:\n\t\t\t\t\t\thrs = int(mins/60)\n\t\t\t\t\t\tmins = mins - (60*hrs)\n\t\t\t\t\t\tresult['elapsed'] = '%d:%02d:%02dh' % (hrs,mins,secs)\n\t\t\t# Purpose is now formatted in the language of the client (now that\n\t\t\t# this LANG is properly propagated to us)\n\t\t\tif 'purpose' in result:\n\t\t\t\tif result['purpose'].find('%') != -1:\n\t\t\t\t\t# make sure to not explode (Bug #26318), better show nothing\n\t\t\t\t\tif 'detail' in result:\n\t\t\t\t\t\tresult['label'] = result['purpose'] % result['detail']\n\t\t\t\telse:\n\t\t\t\t\tresult['label'] = result['purpose']\n\t\t\t# Affordance to reboot... hopefully this gets set before\n\t\t\t# we stop polling on this job status\n\t\t\tself.ucr.load()\t# make it as current as possible\n\t\t\tresult['reboot'] = self.ucr.is_true('update/reboot/required',False)\n\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/installer/status returns:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(result).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" >> %s\" % s)\n\t\t# -----------------------------------\n\n\t\tself.finished(request.id,result)",
"def on_job_update(_job):\n nonlocal job\n job = _job\n # Asserts that job is either pending or canceled.\n assert job.state in ['PENDING', 'CANCELED'], (\n 'job that canceled immediately after submission has wrong '\n 'state `%s`' % job.state\n )",
"def update_progress(self, progress, message):\n assert 0 <= progress < 100\n self._progress = int(progress)\n self.logger.info(\n \"status: STARTED %d%% %s\", self._progress, message or \"\"\n )\n self._callback('on_progress_update', self._progress, message)\n return self.update_response(\n self.encoder.encode_started(self._progress, message)\n )",
"def job_status(self, job_status):\n\n self._job_status = job_status",
"def on_job_update(_job):\n nonlocal job\n job = _job\n # Asserts that job is either pending or canceled.\n assert job.state in ['PENDING', 'CANCELED'], (\n 'Job that canceled immediately after submission has wrong '\n f'state `{job.state}`!')",
"def on_job_update(_job):\n nonlocal job\n job = _job\n # Asserts that job is either pending or canceled.\n assert job.state in ['PENDING', 'CANCELED'], (\n 'Job that canceled immediately after submission has wrong '\n f'state `{job.state}`!')",
"def on_job_update(_job):\n nonlocal job\n job = _job\n # Asserts that job is either pending or canceled.\n assert job.state in ['PENDING', 'CANCELED'], (\n 'Job that canceled immediately after submission has wrong '\n f'state `{job.state}`!')",
"def updateRcloneJobStatus():\n global jobIds, jobStatusGauge\n\n # Check if the jobs are running, update the variables\n for jobName, jobId in jobIds.items():\n jobIsRunning = getRcloneJobRunning(jobId)\n jobIds[jobName] = jobId if jobIsRunning else None\n jobStatusGauge.labels(rclone_job=jobName).set(1 if jobIsRunning else 0)",
"def on_job_update(_job):\n nonlocal job, job_update_counter\n\n # Cancel the job when it updates in the `WORKING` state for the\n # second time. We do it just to be sure it is somewhere in the\n # middle of execution.\n if (job is not None and\n _job.state == job.state == 'WORKING'):\n my_job_async_gen.job_manager_class.cancel(job.id)\n\n job = _job\n job_update_counter += 1",
"def updateStatus(self, status):\n pass",
"def update_job(self, job, token, status, est_complete, context=None):\n return self._client.call_method(\n 'UserAndJobState.update_job',\n [job, token, status, est_complete], self._service_ver, context)",
"def __set_job_status(self, job: Job):\n\n self.redis_client.set(f'jobstatus:{job.id}:{str(job.status)}', f'job:{job.id}')"
]
| [
"0.7460748",
"0.7383144",
"0.7383144",
"0.7323013",
"0.7220424",
"0.71709543",
"0.7023524",
"0.69934624",
"0.69273424",
"0.6905885",
"0.6831107",
"0.6815869",
"0.6808671",
"0.6808671",
"0.6808671",
"0.6808671",
"0.67737204",
"0.6665827",
"0.6663416",
"0.66358966",
"0.66191095",
"0.6611759",
"0.65835553",
"0.65835553",
"0.65835553",
"0.657622",
"0.657386",
"0.65571237",
"0.65522903",
"0.65497255"
]
| 0.7565707 | 0 |
Get the description of a job. | def get_job_description(self, job, context=None):
return self._client.call_method(
'UserAndJobState.get_job_description',
[job], self._service_ver, context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def describe_job(self):\n # GET /jobs/{job_id}\n pass",
"def describe_text_translation_job(JobId=None):\n pass",
"def get_job(self) -> Dict[Text, Text]:\n request = self._client.projects().jobs().get(name=self._job_name)\n return request.execute()",
"def describe_labeling_job(LabelingJobName=None):\n pass",
"def long_description(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"long_description\")",
"def get_job(self, identifier: str):\n self._log_operation('Getting job {i}'.format(i=identifier))\n return self._job_queue.get_job_details(identifier)",
"def get_description(self):\n return self.bot_data_file[\"description\"]",
"def job(self) -> str:\n return self._job",
"def job(self) -> str:\n return self._job",
"def describe_training_job(TrainingJobName=None):\n pass",
"def describe_compilation_job(CompilationJobName=None):\n pass",
"def get_description(self):\n try:\n long_desc = self.__data[\"descriptions\"][\"MM - \" + self.__name][\"text\"].replace(\"<p>\", \"\").split('</p>')[0]\n return long_desc\n except:\n return None",
"def description(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"description\")",
"def description(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"description\")",
"def description(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"description\")",
"def description(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"description\")",
"def description(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"description\")",
"def description(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"description\")",
"def description(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"description\")",
"def description(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"description\")",
"def description(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"description\")",
"def description(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"description\")",
"def description(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"description\")",
"def description(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"description\")",
"def job_subtitle(self, job):\n return str(job)[:max(8, self._project_min_len_unique_id())]",
"def get_job(job_name: str):\n\n job_details = redis_controller.get_job_details(job_name=job_name)\n return job_details",
"def description(self) -> str:\n return pulumi.get(self, \"description\")",
"def description(self) -> str:\n return pulumi.get(self, \"description\")",
"def description(self) -> str:\n return pulumi.get(self, \"description\")",
"def description(self) -> str:\n return pulumi.get(self, \"description\")"
]
| [
"0.7607637",
"0.7192499",
"0.67830104",
"0.6780596",
"0.6714741",
"0.6707569",
"0.6679356",
"0.66656226",
"0.66656226",
"0.66567254",
"0.66285944",
"0.65982926",
"0.65800416",
"0.65800416",
"0.65800416",
"0.65800416",
"0.65800416",
"0.65800416",
"0.65800416",
"0.65800416",
"0.65800416",
"0.65800416",
"0.65800416",
"0.65800416",
"0.6578058",
"0.65775114",
"0.65399903",
"0.65399903",
"0.65399903",
"0.65399903"
]
| 0.83598095 | 0 |
Get the status of a job. | def get_job_status(self, job, context=None):
return self._client.call_method(
'UserAndJobState.get_job_status',
[job], self._service_ver, context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_status(self):\n\t\treturn call_sdk_function('PrlJob_GetStatus', self.handle)",
"def job_status(self, job_id):\n\n response = self.batch_client.describe_jobs(jobs=[job_id])\n return response[\"jobs\"][0][\"status\"]",
"def get_status(self, job_id):\n\n result = self.redis.get('job_status:' + str(job_id))\n return pickle.loads(result) if result else None",
"def get_status(job_id):\n job = fetch_data.AsyncResult(job_id, app=app)\n return jsonify({'job_id': job_id, 'status': job.status})",
"def status(self):\n return self.job_proto.status",
"def get_status(self):\n url = \"data_request?id=jobstatus&job=%d&plugin=zwave\" % self.id\n return self.vera.get(url)",
"def job_status(self, job_id):\n url = self.base_url + \"/ml-service/phoenix-ml/job/status?id={0}\".format(job_id)\n # url = \"http://10.1.2.110:8199/phoenix-ml/job/status?id=12\"\n headers = {\"ApiKey\": self.api_key}\n response = requests.get(url=url, headers=headers)\n return response.text",
"def get_async_job_status(self, job_id, batch=False):\n path = '%s' % job_id\n return self.make_request(path, 'GET', batch=batch)",
"def get_job_status(self):\n if self.worker_thread is None:\n return None\n else:\n return self.worker_thread.get_status()",
"def GetJobStatus(self, job_id):\n return self._SendRequest(HTTP_GET,\n \"/%s/jobs/%s\" % (GANETI_RAPI_VERSION, job_id),\n None, None)",
"def jobStatus(self, jobId):\n params = {'id': jobId}\n try:\n resp = self.gc.get(JobUtils.JOB_ID_PATH, parameters=params)\n except HttpError as e:\n if e.status == 400:\n print('Error. invalid job id:', jobId)\n return {}\n raise\n\n if not resp:\n return ''\n\n status = resp.get('status')\n\n statusStr = JobUtils.getJobStatusStr(status)\n return statusStr",
"async def status(self) -> JobStatus:\n async with self._redis.pipeline(transaction=True) as tr:\n tr.exists(result_key_prefix + self.job_id) # type: ignore[unused-coroutine]\n tr.exists(in_progress_key_prefix + self.job_id) # type: ignore[unused-coroutine]\n tr.zscore(self._queue_name, self.job_id) # type: ignore[unused-coroutine]\n is_complete, is_in_progress, score = await tr.execute()\n\n if is_complete:\n return JobStatus.complete\n elif is_in_progress:\n return JobStatus.in_progress\n elif score:\n return JobStatus.deferred if score > timestamp_ms() else JobStatus.queued\n else:\n return JobStatus.not_found",
"def _check_job_status(self) -> str:\n self._assert_job_created()\n\n r = requests.post(\n f'https://{cc.ROUTE_PREFIX}.stratodem.com/jobs/status',\n headers=dict(\n Authorization=f'Bearer {get_api_token()}',\n ),\n json=dict(job_id=self._job_id)\n )\n\n if not r.status_code == 200:\n raise APIQueryFailedException('Failed to determine job status')\n\n r = r.json()\n\n if not r['success']:\n raise APIQueryFailedException(r)\n else:\n return r['message']",
"def get_job_status(jobid, wait=30):\n cmd = \"scontrol show job {0}\".format(jobid)\n try:\n output = subprocess.check_output(cmd, shell=True)\n m = re.search(\"JobState=(\\w+)\", output)\n except subprocess.CalledProcessError:\n m = False\n\n status = None\n if m:\n status = m.group(1)\n else:\n repeat = 0\n while not m and repeat < wait:\n cmd = \"sacct -b -j {0}\".format(jobid)\n output = subprocess.check_output(cmd, shell=True)\n m = re.search(\"{0}\\s+([A-Z]+)\".format(jobid), output)\n time.sleep(1)\n repeat += 1\n if m:\n status = m.group(1)\n\n if status is None:\n raise ValueError(\"Job not found: {0}\".format(jobid))\n else:\n return status",
"def get_status(job_key):\n job = Job.fetch(job_key, connection=conn)\n\n logs_url = \"{}{}/runner/logs/{}\".format(request.url_root, API_VERSION, job_key)\n status_dict = {\"status\": \"\", \"logs_url\": logs_url}\n return_code = 200\n if job.is_finished:\n status_dict['status'] = \"success\"\n return_code = 200\n elif job.is_failed:\n status_dict['status'] = \"terminal\"\n return_code = 400\n else:\n status_dict['status'] = \"running\"\n status_dict['logs_url'] = \"\"\n return_code = 202\n\n return jsonify(status_dict), return_code",
"def request_status(job_id):\n status = _database_operations.get_status(job_id, Session())\n if status is None:\n flask.abort(404)\n else:\n return json.dumps({\n 'status': status.status,\n 'finished': status.finished\n })",
"def status(self) -> str:\n return self._check_job_status()",
"def get_oozie_status(self, job_id):\n self.echo('Checking status...')\n status = self.call_return(\"oozie job -oozie \" + self.pylot_cfg.hdfs_oozie_interface + \" -info \" + job_id + \" | grep 'Status' | grep ':' | awk '{print $NF}'\")\n status = status.strip('\\n')\n return status",
"def status(self) -> pulumi.Output['outputs.JobStatus']:\n return pulumi.get(self, \"status\")",
"def check_job_status(self, jobid=None):\n\n if jobid is None:\n if hasattr(self, 'current_job'):\n jobid = self.current_job\n else:\n jobid = self.current_job\n\n response = self._request(\n 'GET', CosmoSim.QUERY_URL + '/{}'.format(jobid) + '/phase',\n auth=(self.username, self.password), data={'print': 'b'},\n cache=False)\n\n log.info(\"Job {}: {}\".format(jobid, response.content))\n return response.content",
"def status(self, job_id: str) -> dict:\n session = self._session()\n response = session.get(self._status_url(job_id))\n if response.ok:\n fields = [\n 'status', 'message', 'progress', 'createdAt', 'updatedAt', 'request',\n 'numInputGranules'\n ]\n status_subset = {k: v for k, v in response.json().items() if k in fields}\n return {\n 'status': status_subset['status'],\n 'message': status_subset['message'],\n 'progress': status_subset['progress'],\n 'created_at': dateutil.parser.parse(status_subset['createdAt']),\n 'updated_at': dateutil.parser.parse(status_subset['updatedAt']),\n 'request': status_subset['request'],\n 'num_input_granules': int(status_subset['numInputGranules']),\n }\n else:\n response.raise_for_status()",
"def job_status(self) -> JobStatus:\n statuses = set()\n with self._jobs.lock:\n\n # No jobs present\n if not self._jobs:\n return JobStatus.DONE\n\n statuses = set()\n for job in self._jobs.values():\n if job:\n statuses.add(job.status())\n\n # If any jobs are in non-DONE state return that state\n for stat in [\n JobStatus.ERROR,\n JobStatus.CANCELLED,\n JobStatus.RUNNING,\n JobStatus.QUEUED,\n JobStatus.VALIDATING,\n JobStatus.INITIALIZING,\n ]:\n if stat in statuses:\n return stat\n\n return JobStatus.DONE",
"def get_status(self, refresh: bool = True) -> JobStatus:\n if refresh:\n status = self.connection.hget(self.key, 'status')\n self._status = as_text(status) if status else None\n return self._status",
"def job_status(bot, update, args, job_queue, chat_data):\n if len(args) == 0:\n update.message.reply_text('No parameter provided')\n return\n\n job_name = args[0]\n if job_name not in settings.JOBS:\n update.message.reply_text(\n 'Sorry {0} is not a valid job'.format(job_name))\n return\n\n job = find_job(job_name, job_queue)\n\n if not job:\n update.message.reply_text('{0} job is not running'.format(job_name))\n return\n\n update.message.reply_text('{0} job is running'.format(job_name))",
"def job_status(job_id):\n job_db = JobDb()\n job = job_db.get_job_by_id(job_id)\n job_db.close()\n\n if job is None:\n raise ApiError(\n \"job_not_found\",\n f\"Job '{job_id}' not found\",\n 404)\n\n job['duration'] = str(datetime.timedelta(\n seconds=int((job['updated'] - job['created']).total_seconds())))\n return jsonify(job)",
"def get_job_status(self, mission):\n\n # initialize task status\n status = dict(active=0, running=0, succeeded=0, failed=0)\n\n # get job status if it exists. Otherwise, return N/A\n try:\n the_job = self.batch_client.job.get(job_id=mission.job_name)\n\n # get counts of tasks in different statuses\n status_counts = self.batch_client.job.get_task_counts(mission.job_name)\n except azure.batch.models.BatchErrorException as err:\n if err.message.value.startswith(\"The specified job does not exist\"):\n return \"N/A\", status\n # raise an exception for other kinds of errors\n raise\n\n # update the dictionary\n status[\"active\"] = status_counts.active\n status[\"running\"] = status_counts.running\n status[\"succeeded\"] = status_counts.succeeded\n status[\"failed\"] = status_counts.failed\n\n return the_job.state.name, status",
"def check_status(self, job_id, config_id=1):\n response = self.do_request(\n self.base_url +\n \"/oasis/statusAsync/\" +\n str(config_id) + \"/\" +\n str(job_id) + \"/\"\n )\n return response",
"def __get_job_status_from_queue__(self):\n\n return (lambda job: (int(job[-1]['JobStatus']),\n job[-1]))(self.schedd.query(\"ClusterId =?= {0}\".format(self.id)))",
"def result(self, job):\n\n assert isinstance(job, six.string_types)\n\n try:\n response = requests.get('{}/api/v1/result/{}'.format(self.URL, job))\n except (Timeout, ConnectionError):\n raise ServiceError('Service unavailable: timeout.', 4)\n\n result = self._validate(response)\n data = result.get('state')\n state = State.from_dict(data) if data else None\n\n if state is not None:\n self.__previous_job = self.__current_job\n self.__current_job = None\n\n return result.get('status'), state",
"def get(self, job_id):\n\n if job_id:\n status = {\"state\": self.runner_service.status(job_id)}\n else:\n # TODO: Update the correct status for all jobs; the filtering in jobrunner doesn't work here.\n all_status = self.runner_service.status_all()\n status_dict = {}\n for k, v in all_status.iteritems():\n status_dict[k] = {\"state\": v}\n status = status_dict\n\n self.write_json(status)"
]
| [
"0.8510526",
"0.83282834",
"0.82505345",
"0.8214032",
"0.81855243",
"0.81431603",
"0.7931362",
"0.7918316",
"0.79022324",
"0.7844217",
"0.77791566",
"0.77310485",
"0.767955",
"0.76423264",
"0.76233286",
"0.75556815",
"0.7536898",
"0.7514438",
"0.74993336",
"0.74988097",
"0.7480298",
"0.74580014",
"0.7386342",
"0.7378307",
"0.7375098",
"0.73403215",
"0.7303686",
"0.726318",
"0.72605085",
"0.7204682"
]
| 0.83676547 | 1 |
List jobs. Leave 'services' empty or null to list jobs from all services. list_jobs2 | def list_jobs(self, services, filter, context=None):
return self._client.call_method(
'UserAndJobState.list_jobs',
[services, filter], self._service_ver, context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_job_services(self, context=None):\n return self._client.call_method(\n 'UserAndJobState.list_job_services',\n [], self._service_ver, context)",
"def get_jobs_list(self, response):\n pass",
"def list(self):\n return self.rpc.call(MsfRpcMethod.JobList)",
"def list_jobs(user_data, cache):\n user = cache.ensure_user(user_data)\n\n jobs = []\n for job in cache.get_jobs(user):\n try:\n if job.project_id:\n job.project = cache.get_project(user, job.project_id)\n except IntermittentProjectIdError:\n continue\n\n jobs.append(job)\n\n return result_response(JobListResponseRPC(), {\"jobs\": jobs})",
"def list_service(request):\n builder = http.ResponseBuilder()\n master_addr = request.GET.get('master',None)\n if not master_addr:\n return builder.error('master is required').build_json()\n\n client = wrapper.Galaxy(master_addr,settings.GALAXY_CLIENT_BIN)\n status,jobs = client.list_jobs()\n LOG.info(status)\n if not status:\n return builder.error('fail to list jobs').build_json()\n ret = []\n for job in jobs:\n ret.append(job.__dict__)\n return builder.ok(data=ret).build_json()",
"def ListJobs(self, token=None):\n return aff4.FACTORY.Open(self.CRON_JOBS_PATH, token=token).ListChildren()",
"def listJobs():\n logger.debug('[FLASKWEB /jobs] Request for job listing')\n jobs = db.getJobs(numdays=2)\n for job in jobs:\n job['time'] = datetime.datetime.strptime(job['time'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()\n if job['complete']:\n job['complete'] = datetime.datetime.strptime(job['complete'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()\n\n # Garbage Collect Orpahened jobs\n compiles = db.getCompiles()\n for compile in compiles:\n if compile['submit']:\n compile['submit'] = datetime.datetime.strptime(compile['submit'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()\n if compile['complete']:\n compile['complete'] = datetime.datetime.strptime(compile['complete'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()\n # for c in compiles:\n # if c['uid'] not in compile_tasks.keys():\n # db.updateCompile(c['uid'], status='KILLED', done=True)\n # compiles = db.getCompiles()\n\n if request.headers['Accept'] == 'application/json':\n return jsonify(dict(LaunchJobs=jobs, CompilingJobs=compiles)), 200\n else:\n return render_template(\"jobs.html\", joblist=jobs, compilelist=compiles)",
"def list_jobs(arn=None, nextToken=None):\n pass",
"def list_jobs(self):\n\n return dict(self._from_json(self.manage.run(override=\"list-jobs\")))",
"def list_jobs():\n\n name_to_job_details = redis_controller.get_name_to_job_details()\n return list(name_to_job_details.values())",
"def list(self):\n\n for job_name in self.upstart.get_all_jobs():\n yield self.get_service(job_name)",
"def get_job_list(self):\n return self.job_list",
"def get_job_list(self):\n return self.job_list",
"def get_job_list(self):\n job_list = []\n if mysql.job_list() == None:\n return job_list\n return mysql.job_list()",
"def search_jobs(self, bill_id: int = 0, limit: int = 0) -> List[Job]:\n pass",
"def get_jobs(self, type = None):\n joblist = JobList()\n for jobs in self.sm.get_jobs(type = type):\n joblist.add_job(jobs['identifier'], jobs['phase'])\n return joblist.tostring()",
"def list_services(ctx):\n pass",
"def jobs():\n result = []\n out = subprocess.check_output([\"/bin/launchctl\", \"list\"]).decode()\n for row in out.splitlines()[1:]:\n result.append(Job(row))\n return result",
"def jobs(self, tags=None, tags_intersect=None):\n return list(self.all_jobs(tags=tags, tags_intersect=tags_intersect))",
"def getJobs(**options):\n criteria = search.JobSearch.criteriaFromOptions(**options)\n jobSeq = Cuebot.getStub('job').GetJobs(\n job_pb2.JobGetJobsRequest(r=criteria), timeout=Cuebot.Timeout).jobs\n return [Job(j) for j in jobSeq.jobs]",
"async def request_jobs_list(self, jobs_list_active_only: bool, *args, **kwargs) -> List[str]:\n # TODO: implement\n raise NotImplementedError('{} function \"request_jobs_list\" not implemented yet'.format(self.__class__.__name__))",
"def get_jobs(self, *, params: Optional[dict] = None) -> \"resource_types.Jobs\":\n\n return communicator.Jobs(self.__requester).fetch(parameters=params)",
"def search_jobs(self, bill_id: int = 0, limit: int = 0) -> List[Job]:\n res = []\n query = QSqlQuery()\n q = \"select id, hours, price, job from jobs\"\n if bill_id > 0:\n q += \" where b_id=?\"\n q += \" order by id desc\"\n if limit > 0:\n q += \" limit ?\"\n query.prepare(q)\n if bill_id > 0:\n query.addBindValue(bill_id)\n if limit > 0:\n query.addBindValue(limit)\n query.exec_()\n while query.next():\n res.append(_extract_job(query))\n return res",
"def jobs(ctx, page):\n user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),\n ctx.obj.get('experiment'))\n page = page or 1\n try:\n response = PolyaxonClient().experiment.list_jobs(\n user, project_name, _experiment, page=page)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not get jobs for experiment `{}`.'.format(_experiment))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n meta = get_meta_response(response)\n if meta:\n Printer.print_header('Jobs for experiment `{}`.'.format(_experiment))\n Printer.print_header('Navigation:')\n dict_tabulate(meta)\n else:\n Printer.print_header('No jobs found for experiment `{}`.'.format(_experiment))\n\n objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))\n for o in response['results']]\n objects = list_dicts_to_tabulate(objects)\n if objects:\n Printer.print_header(\"Jobs:\")\n objects.pop('experiment', None)\n dict_tabulate(objects, is_list_dict=True)",
"def get_jobs(k8s_ctx: str, selector: Optional[str] = None, dry_run: bool = False) -> List[str]:\n cmd = 'kubectl --context={k8s_ctx} get jobs -o json'\n if selector is not None:\n cmd += f' -l {selector}'\n if dry_run:\n logging.info(cmd)\n return list()\n\n p = safe_exec(cmd)\n if not p.stdout:\n # a small JSON structure is always returned, even if there are no jobs\n raise RuntimeError('Unexpected lack of output for listing kubernetes jobs')\n out = json.loads(p.stdout.decode())\n return [i['metadata']['name'] for i in out['items']]",
"def list_jobs(exproot, **kwargs):\n for jobname, args, results in load_all(exproot):\n print jobname, args, results",
"def list(self, request):\n jobs = Job.objects.all()\n\n city = self.request.query_params.get('city', None)\n state = self.request.query_params.get('state', None)\n\n # Support filtering jobs by user id\n job = self.request.query_params.get('user', None)\n if job is not None:\n jobs = jobs.filter(user=request.user)\n\n if city is not None:\n jobs = jobs.filter(city=city)\n\n if state is not None:\n jobs = jobs.filter(state=state)\n\n serializer = JobSerializer(\n jobs, many=True, context={'request': request})\n return Response(serializer.data)",
"def get_jobs(self, jobstore=None):\n\n return self._scheduler.get_jobs(jobstore)",
"def _get_jobs():\n return _get_bigquery_service().jobs()",
"def query(self, jobs):\n assert isinstance(jobs, list), 'Jobs must be type list'\n assert len(jobs) > 0, 'One or more jobs required'\n\n req = list()\n if len(jobs) > 1:\n for r in self._batch_request(jobs):\n req.append(\n ''.join([self._scheduler_endpoint, '?', '&'.join(r)]))\n else:\n req = \"{}?job={}\".format(\n self._scheduler_endpoint, jobs[0])\n\n try:\n ret = list()\n for resp in self._api_get(req):\n ret.extend(resp.json())\n return ret\n except HTTPError as e:\n raise JobClientError(e.message)"
]
| [
"0.7235027",
"0.70179933",
"0.67329496",
"0.6717925",
"0.66287684",
"0.65007955",
"0.6481365",
"0.6462476",
"0.64445263",
"0.64408475",
"0.64227414",
"0.64020425",
"0.64020425",
"0.63735825",
"0.6372562",
"0.6354887",
"0.6321302",
"0.6251158",
"0.6241478",
"0.6236662",
"0.6215695",
"0.6148853",
"0.6079671",
"0.60717773",
"0.6016245",
"0.6001984",
"0.5935866",
"0.5921567",
"0.59082586",
"0.5888188"
]
| 0.72507447 | 0 |
List all job services. Note that only services with jobs owned by the user or shared with the user via the default auth strategy will be listed. | def list_job_services(self, context=None):
return self._client.call_method(
'UserAndJobState.list_job_services',
[], self._service_ver, context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list(self):\n\n for job_name in self.upstart.get_all_jobs():\n yield self.get_service(job_name)",
"def list_services(ctx):\n pass",
"def list_service(request):\n builder = http.ResponseBuilder()\n master_addr = request.GET.get('master',None)\n if not master_addr:\n return builder.error('master is required').build_json()\n\n client = wrapper.Galaxy(master_addr,settings.GALAXY_CLIENT_BIN)\n status,jobs = client.list_jobs()\n LOG.info(status)\n if not status:\n return builder.error('fail to list jobs').build_json()\n ret = []\n for job in jobs:\n ret.append(job.__dict__)\n return builder.ok(data=ret).build_json()",
"def list_jobs(self, services, filter, context=None):\n return self._client.call_method(\n 'UserAndJobState.list_jobs',\n [services, filter], self._service_ver, context)",
"def get_services(self):\r\n return get_service_list()",
"def all_services(self):\n services = oc.all_service_names()\n for s in services:\n print(s)\n print(\"#total\", len(services))",
"def list_services(self, **kwargs: Optional[Any]) -> list:\n\n self.logger.debug(\"list_services: %s\", kwargs)\n\n namespace = kwargs.get(\"namespace\", \"global\")\n\n return self.AD.services.list_services(namespace) # retrieve services",
"def services(self):\n return self.agent.http.get(\n lambda x: json.loads(x.body), '/v1/agent/services')",
"def list_services(self):\n response = self._get()\n\n services = []\n for s in response[\"services\"]:\n services.append(_create_service_from_json(s, self._session, self._url_base, s[\"folderName\"]))\n\n return services",
"def list(self):\n return self.rpc.call(MsfRpcMethod.JobList)",
"def list_services(self, **params):\n url = 'os-services'\n if params:\n url += '?%s' % urllib.urlencode(params)\n\n resp, body = self.get(url)\n body = json.loads(body)\n schema = self.get_schema(self.schema_versions_info)\n self.validate_response(schema.list_services, resp, body)\n return rest_client.ResponseBody(resp, body)",
"def list_state_services(self, auth, context=None):\n return self._client.call_method(\n 'UserAndJobState.list_state_services',\n [auth], self._service_ver, context)",
"def get(self):\n return UserServices.get_all()",
"def all_jobs():\n\n jobs = Job.get_all()\n\n oneoffs = OneOff.get_all()\n\n job = JobView(None, jobs, oneoffs, False, Job.count() > 0)\n\n add_trello_task_links_to_g()\n\n return render_template(\"jobs.template.html\", page_title=\"Jobs\", job_info=job)",
"def get_all():\n if not SERVICE_DIR:\n raise CommandExecutionError(\"Could not find service directory.\")\n # - List all daemontools services in\n return sorted(os.listdir(SERVICE_DIR))",
"def list(self):\n return self.connection.get(self.service)",
"def service_list():\n data = list_services()\n table = present(lambda: data,\n renderer='table',\n headers=['Service Name', 'URLS', 'Service Type', \"Memory Usages\", 'Replicas', 'Started at',\n 'Updated at',\n 'State', 'Restarts'],\n columns=['name', 'urls', 'service_type', 'memory', 'replicas', 'start_date', 'last_update',\n 'state',\n 'service_restarts'])\n if table:\n click.echo(table)\n else:\n click.echo('\\nYou have no running services right now, why don\\'t you try deploying one? \\n'\n 'have fun and follow the link below:\\n')\n click.echo('https://docs.fandogh.cloud/docs/services.html\\n')",
"def get_services(self):\n\n return list(self.services.values())",
"def list_services(profile=None, api_key=None):\n return salt.utils.pagerduty.list_items(\n \"services\", \"name\", __salt__[\"config.option\"](profile), api_key, opts=__opts__\n )",
"def view_all(options, client):\n if options.show_events:\n return display_events(client.events())\n\n return \"\".join([\n display.DisplayServices().format(client.services()),\n '\\n',\n display.DisplayJobs(options).format(client.jobs())\n ])",
"def list_jobs(self):\n\n return dict(self._from_json(self.manage.run(override=\"list-jobs\")))",
"def list_jobs(user_data, cache):\n user = cache.ensure_user(user_data)\n\n jobs = []\n for job in cache.get_jobs(user):\n try:\n if job.project_id:\n job.project = cache.get_project(user, job.project_id)\n except IntermittentProjectIdError:\n continue\n\n jobs.append(job)\n\n return result_response(JobListResponseRPC(), {\"jobs\": jobs})",
"def list(self, request):\n jobs = Job.objects.all()\n\n city = self.request.query_params.get('city', None)\n state = self.request.query_params.get('state', None)\n\n # Support filtering jobs by user id\n job = self.request.query_params.get('user', None)\n if job is not None:\n jobs = jobs.filter(user=request.user)\n\n if city is not None:\n jobs = jobs.filter(city=city)\n\n if state is not None:\n jobs = jobs.filter(state=state)\n\n serializer = JobSerializer(\n jobs, many=True, context={'request': request})\n return Response(serializer.data)",
"def running_services(self) -> List[Callable]:\n return self._running_svcs",
"def services(self) -> List[Service]:\n return self._services",
"def get_service_list():\n service_dict = requests.get('http://consul:8500/v1/catalog/services').json()\n service_list = []\n for s in service_dict:\n service_list.append(s)\n return service_list",
"def getServices(self):\n catalog = plone.api.portal.get_tool('portal_catalog')\n path = '{}/catalog'.format('/'.join(plone.api.portal.get().getPhysicalPath()))\n query = dict(portal_type='Service', sort_on='sortable_title', path=path)\n result = list()\n for brain in catalog(**query):\n result.append((brain.getId, brain.Title))\n return result",
"def getServices(self):\n pass",
"def available_services(self) -> list[str]:\r\n return self.services",
"def find_services(self) -> List[str]:\n results = self.collection.distinct(\"process.serviceName\")\n return [result for result in results]"
]
| [
"0.7430709",
"0.71067923",
"0.6972616",
"0.6830034",
"0.67643577",
"0.6763751",
"0.6646712",
"0.66330963",
"0.64929277",
"0.64823115",
"0.64704317",
"0.6468045",
"0.6444789",
"0.63784015",
"0.63213545",
"0.6298812",
"0.62907916",
"0.6273339",
"0.62273616",
"0.6216419",
"0.6211501",
"0.61245185",
"0.6095552",
"0.608746",
"0.6084627",
"0.60680795",
"0.60557044",
"0.60341156",
"0.6012935",
"0.6006638"
]
| 0.8118722 | 0 |
Share a job. Sharing a job to the same user twice or with the job owner has no effect. Attempting to share a job not using the default auth strategy will fail. | def share_job(self, job, users, context=None):
return self._client.call_method(
'UserAndJobState.share_job',
[job, users], self._service_ver, context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def register_job(self, job_id):\n ujs = self.__ujs_client()\n ujs.share_job(job_id, [self.nar_user])",
"def share(job):\n client = get_dropbox_client()\n\n try:\n return client.share(\n '/Video Automation Platform/jobs/{job}/{job}.mov'.format(job=job))\n\n except ErrorResponse:\n return False",
"def share(self, request):\n try:\n article = self.get_object()\n except PermissionDenied as pd:\n return Response({'error': str(pd)})\n\n article.shared_by.add(request.user)\n return Response({'message': '\"{}\" is shared'.format(article.title)})",
"def share_link(cls, user, link):",
"def share_link(cls, user, link):",
"def get_job_shared(self, job, context=None):\n return self._client.call_method(\n 'UserAndJobState.get_job_shared',\n [job], self._service_ver, context)",
"def share(observer, tid):\n try:\n manager = Actions()\n manager.share_task(observer, tid)\n except Exception as e:\n click.echo(e)",
"def share_task(self, observer_uid, tid):\n self.task_controller.share(observer_uid, tid)",
"def shareItem(sharedItem, toRole=None, toName=None, shareID=None,\n interfaces=ALL_IMPLEMENTED):\n warnings.warn(\"Use Role.shareItem() instead of sharing.shareItem().\",\n PendingDeprecationWarning,\n stacklevel=2)\n if toRole is None:\n if toName is not None:\n toRole = getPrimaryRole(sharedItem.store, toName, True)\n else:\n toRole = getEveryoneRole(sharedItem.store)\n return toRole.shareItem(sharedItem, shareID, interfaces)",
"def submit_job(self, data):\n params = data['params']\n # [worker_name, job_id, extranonce2, ntime, nonce]\n # [\"slush.miner1\", \"bf\", \"00000001\", \"504e86ed\", \"b2957c02\"]\n self.logger.debug(\n \"Recieved work submit:\\n\\tworker_name: {0}\\n\\t\"\n \"job_id: {1}\\n\\textranonce2: {2}\\n\\t\"\n \"ntime: {3}\\n\\tnonce: {4} ({int_nonce})\"\n .format(\n *params,\n int_nonce=unpack(str(\"<L\"), unhexlify(params[4]))))\n\n try:\n difficulty, jobid = self.job_mapper[data['params'][1]]\n job = self.net_state['jobs'][jobid]\n except KeyError:\n # stale job\n self.send_error(self.STALE_SHARE)\n # TODO: should really try to use the correct diff\n self.server_state['reject_stale'].incr(self.difficulty)\n return self.STALE_SHARE\n\n header = job.block_header(\n nonce=params[4],\n extra1=self.id,\n extra2=params[2],\n ntime=params[3])\n\n # Check a submitted share against previous shares to eliminate\n # duplicates\n share = (self.id, params[2], params[4])\n if share in job.acc_shares:\n self.logger.info(\"Duplicate share rejected from worker {}.{}!\"\n .format(self.address, self.worker))\n self.send_error(self.DUP_SHARE)\n self.server_state['reject_dup'].incr(difficulty)\n return self.DUP_SHARE\n\n job_target = target_from_diff(difficulty, self.config['diff1'])\n hash_int = self.config['pow_func'](header)\n if hash_int >= job_target:\n self.logger.info(\"Low diff share rejected from worker {}.{}!\"\n .format(self.address, self.worker))\n self.send_error(self.LOW_DIFF)\n self.server_state['reject_low'].incr(difficulty)\n return self.LOW_DIFF\n\n # we want to send an ack ASAP, so do it here\n self.send_success(self.msg_id)\n self.logger.info(\"Valid share accepted from worker {}.{}!\"\n .format(self.address, self.worker))\n # Add the share to the accepted set to check for dups\n job.acc_shares.add(share)\n self.server_state['shares'].incr(difficulty)\n self.celery.send_task_pp('add_share', self.address, difficulty)\n\n # valid network hash?\n if hash_int >= job.bits_target:\n return self.VALID_SHARE\n\n try:\n self.logger.log(35, \"Valid network block identified!\")\n self.logger.info(\"New block at height %i\" % self.net_state['current_height'])\n self.logger.info(\"Block coinbase hash %s\" % job.coinbase.lehexhash)\n block = hexlify(job.submit_serial(header))\n self.logger.log(35, \"New block hex dump:\\n{}\".format(block))\n self.logger.log(35, \"Coinbase: {}\".format(str(job.coinbase.to_dict())))\n for trans in job.transactions:\n self.logger.log(35, str(trans.to_dict()))\n except Exception:\n # because I'm paranoid...\n self.logger.error(\"Unexcpected exception in block logging!\", exc_info=True)\n\n def submit_block(conn):\n retries = 0\n while retries < 5:\n try:\n res = conn.submitblock(block)\n except (CoinRPCException, socket.error, ValueError) as e:\n self.logger.error(\"Block failed to submit to the server {}!\"\n .format(conn.name), exc_info=True)\n self.logger.error(getattr(e, 'error'))\n else:\n if res is None:\n hash_hex = hexlify(\n sha256(sha256(header).digest()).digest()[::-1])\n self.celery.send_task_pp(\n 'add_block',\n self.address,\n self.net_state['current_height'] + 1,\n job.total_value,\n job.fee_total,\n hexlify(job.bits),\n hash_hex)\n self.logger.info(\"NEW BLOCK ACCEPTED by {}!!!\"\n .format(conn.name))\n self.server_state['block_solve'] = int(time())\n break # break retry loop if success\n else:\n self.logger.error(\n \"Block failed to submit to the server {}, \"\n \"server returned {}!\".format(conn.name, res),\n exc_info=True)\n retries += 1\n sleep(1)\n self.logger.info(\"Retry {} for connection {}\".format(retries, conn.name))\n for conn in self.net_state['live_connections']:\n # spawn a new greenlet for each submission to do them all async.\n # lower orphan chance\n spawn(submit_block, conn)\n\n return self.BLOCK_FOUND",
"def shareNote(self, authenticationToken, guid):\r\n pass",
"def ensure_share(self, context, share, share_server=None):\n pass",
"def share_with_group_lock(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"share_with_group_lock\")",
"def share_with_group_lock(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"share_with_group_lock\")",
"def claim(self, job, owner):\n raise NotImplementedError()",
"def shareNote(self, authenticationToken, guid):\r\n self.send_shareNote(authenticationToken, guid)\r\n return self.recv_shareNote()",
"def ensure_share(self, context, share, share_server=None):\r\n LOG.debug(\"Ensure share.\")",
"def share(self, share):\n if share is None:\n raise ValueError(\"Invalid value for `share`, must not be `None`\") # noqa: E501\n\n self._share = share",
"async def _push_share(self, container, recipient, rights):\n client_url = os.environ.get(\"SWIFT_X_ACCOUNT_SHARING_URL\", None)\n if not client_url:\n logging.log(\n logging.ERROR,\n \"Swift X Account sharing API environment variables %s%s\",\n \"haven't been sourced. Please source the file if it is \",\n \"available, or download a new one from the storage UI.\",\n )\n async with swift_x_account_sharing_bind.SwiftXAccountSharing(\n client_url\n ) as client:\n await client.share_new_access(\n os.environ.get(\"OS_PROJECT_ID\", None),\n container,\n recipient,\n rights,\n self._get_address(),\n )",
"def share_with_group_lock(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"share_with_group_lock\")",
"def modify_share(self, pool, project, share, arg):\n svc = self.share_path % (pool, project, share)\n ret = self.rclient.put(svc, arg)\n if ret.status != restclient.Status.ACCEPTED:\n exception_msg = (_('Error modifying %(arg)s '\n ' of share %(id)s.')\n % {'arg': arg,\n 'id': share})\n raise exception.ShareBackendException(msg=exception_msg)",
"def share(self, trans, id=None, email=\"\", use_panels=False):\n msg = mtype = None\n visualization = self.get_visualization(trans, id, check_ownership=True)\n if email:\n other = trans.sa_session.query(model.User) \\\n .filter(and_(model.User.table.c.email == email,\n model.User.table.c.deleted == false())) \\\n .first()\n if not other:\n mtype = \"error\"\n msg = (\"User '%s' does not exist\" % escape(email))\n elif other == trans.get_user():\n mtype = \"error\"\n msg = (\"You cannot share a visualization with yourself\")\n elif trans.sa_session.query(model.VisualizationUserShareAssociation) \\\n .filter_by(user=other, visualization=visualization).count() > 0:\n mtype = \"error\"\n msg = (\"Visualization already shared with '%s'\" % escape(email))\n else:\n share = model.VisualizationUserShareAssociation()\n share.visualization = visualization\n share.user = other\n session = trans.sa_session\n session.add(share)\n self.create_item_slug(session, visualization)\n session.flush()\n viz_title = escape(visualization.title)\n other_email = escape(other.email)\n trans.set_message(\"Visualization '%s' shared with user '%s'\" % (viz_title, other_email))\n return trans.response.send_redirect(web.url_for(\"/visualizations/sharing?id=%s\" % id))\n return trans.fill_template(\"/ind_share_base.mako\",\n message=msg,\n messagetype=mtype,\n item=visualization,\n email=email,\n use_panels=use_panels)",
"def authenticateToSharedNotebook(self, shareKey, authenticationToken):\r\n pass",
"def share(link, emails, from_name = \"\", reply_to = \"\", body = \"\"):\r\n now = datetime.datetime.now(g.tz)\r\n ival = now - timeago(g.new_link_share_delay)\r\n date = max(now,link._date + ival)\r\n Email.handler.add_to_queue(c.user, link, emails, from_name, g.share_reply,\r\n date, request.ip, Email.Kind.SHARE,\r\n body = body, reply_to = reply_to)",
"def test_new_share(self):\n \n test_user_with_checkpoint = self.create_saved_test_user_with_checkpoint()\n another_test_user_to_share = self.create_saved_test_user()\n \n data = {\"user_id\": test_user_with_checkpoint.user_obj.id,\n \"to_user_id\": another_test_user_to_share.user_obj.id,\n \"signature\": gen_signature(\"put\",\n \"share\",\n gen_api_key(test_user_with_checkpoint.user_obj.access_token, \n test_user_with_checkpoint.user_obj.id)),\n \"user_checkpoint_id\": test_user_with_checkpoint.user_checkpoint_obj.id\n }\n \n resp = self.client.put(\"/share/\", data=data)\n assert \"ok\" in resp.data\n assert not get_share_w_attr(test_user_with_checkpoint.user_obj, \n another_test_user_to_share.user_obj, \n test_user_with_checkpoint.user_checkpoint_obj) is None",
"def job(self, job: str):\n\n self._job = job",
"def job(self, job: str):\n\n self._job = job",
"def share_directory(self):\n # Get the user to share file/folder with.\n share_user = User.query.filter_by(email = self.email.data).first()\n if not share_user:\n return\n\n # The source to copy to another user.\n filename = os.listdir(self.path.data)[int(self.index.data)]\n src = os.path.join(self.path.data, filename)\n # Get home path for the user to share folder with.\n dst = os.path.join(share_user.get_files_path(), filename)\n # Copy source to destination.\n copytree(src, dst)",
"def test_auth_sharable_can_share(self):\n self.do_sharable(True, 'pattieblack', FakeMembership(True),\n tenant='froggy')",
"def upload_shared():\n # MARK: default copy to home dir\n put(conf.INS_ARGS['shared_folder'], '~/')"
]
| [
"0.6442974",
"0.6359261",
"0.5961956",
"0.5836367",
"0.5836367",
"0.5754939",
"0.5666393",
"0.5660614",
"0.5620277",
"0.56136906",
"0.55980754",
"0.55236447",
"0.5424858",
"0.5424858",
"0.5423828",
"0.5413373",
"0.5408935",
"0.53822124",
"0.5368407",
"0.53632206",
"0.5328321",
"0.5326472",
"0.52775466",
"0.52290785",
"0.52087533",
"0.52012557",
"0.52012557",
"0.52011454",
"0.51453346",
"0.5098986"
]
| 0.76312286 | 0 |
Stop sharing a job. Removing sharing from a user that the job is not shared with or the job owner has no effect. Attemping to unshare a job not using the default auth strategy will fail. | def unshare_job(self, job, users, context=None):
return self._client.call_method(
'UserAndJobState.unshare_job',
[job, users], self._service_ver, context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unregister_job(self, job_id):\n ujs = self.__ujs_client()\n ujs.unshare_job(job_id, [self.nar_user])",
"def stopSharingNote(self, authenticationToken, guid):\r\n pass",
"def stopSharingNote(self, authenticationToken, guid):\r\n self.send_stopSharingNote(authenticationToken, guid)\r\n self.recv_stopSharingNote()",
"def cancel_job(self, job):\n try:\n self.jobs.remove(job)\n except ValueError:\n pass",
"def unShare(sharedItem):\n sharedItem.store.query(Share, Share.sharedItem == sharedItem).deleteFromStore()",
"def remove(self):\n\t\tcall_sdk_function('PrlShare_Remove', self.handle)",
"def unclaim(self, job, owner):\n raise NotImplementedError()",
"def teardown_job(self, job, filesystem_only=True):\n oqp = job.oq_params\n self.teardown_upload(oqp.upload, filesystem_only=filesystem_only)\n if filesystem_only:\n return\n job.delete()\n oqp.delete()",
"def manage_removeSharedSecret(self, REQUEST):\n self._shared_secret = None\n response = REQUEST.response\n response.redirect(\n '%s/manage_secret?manage_tabs_message=%s' %\n (self.absolute_url(), 'Shared+secret+removed.')\n )",
"def _delete_job(self, job):",
"def unshare_from_zone(self, zone):\n if isinstance(zone, basestring):\n zone = self.project.get_flow().get_zone(zone)\n zone.remove_shared(self)",
"async def job_remove(self, uid):\n self._require_running()\n job = self._get_job(uid)\n await job.close()\n del self._jobs[uid]\n del self._jobs_by_connection[job.sender.connection][uid]\n if len(self._jobs_by_connection[job.sender.connection]) == 0:\n del self._jobs_by_connection[job.sender.connection]\n self._log.debug('Removed job %s', job)",
"def stop_job(self):\n # DELETE /jobs/{job_id}/results\n pass",
"def delete_share(self, context, share, share_server=None):\n local_share_path = self._get_local_share_path(share)\n cmd = ['rm', '-rf', local_share_path]\n try:\n self._execute(*cmd, run_as_root=True)\n except exception.ProcessExecutionError:\n LOG.error(_LE('Unable to delete share %s'), share['name'])\n raise",
"def test_remove_share(self):\n self.app.delete(url=\"/config/shares?share=80&destination=gsiftp://nowhere&vo=dteam\", status=400)\n self.app.delete(url=\"/config/shares?share=80&destination=gsiftp://nowhere&vo=dteam&source=gsiftp://source\", status=204)",
"def delete_share(self, context, share, share_server=None):\n volume_uuid = self._resolve_volume_name(share['name'],\n share['project_id'])\n if not volume_uuid:\n LOG.warning(\"No volume found for \"\n \"share %(project_id)s/%(name)s\",\n {\"project_id\": share['project_id'],\n \"name\": share['name']})\n return\n\n if self.configuration.quobyte_delete_shares:\n self.rpc.call('deleteVolume', {'volume_uuid': volume_uuid})\n else:\n self.rpc.call('exportVolume', {\"volume_uuid\": volume_uuid,\n \"remove_export\": True,\n })",
"def remove_job(self, job_specifier, _unprotect=False):\n self._project.remove_job(job_specifier=job_specifier, _unprotect=_unprotect)",
"def delete_share(self, pool, project, share):\n svc = self.share_path % (pool, project, share)\n ret = self.rclient.delete(svc)\n if ret.status != restclient.Status.NO_CONTENT:\n exception_msg = (('Error deleting '\n 'share: %(share)s to '\n 'pool: %(pool)s '\n 'project: %(project)s '\n 'return code: %(ret.status)d '\n 'message: %(ret.data)s.'),\n {'share': share,\n 'pool': pool,\n 'project': project,\n 'ret.status': ret.status,\n 'ret.data': ret.data})\n LOG.error(exception_msg)",
"def leave(self):\n self.remove(\n self.subreddit._reddit.config.username or self.subreddit._reddit.user.me()\n )",
"def cancel_unmute(self, id: int) -> None:\n\n self.tasks.remove_job(str(id), 'default')",
"def remove_user(self):\n self.currentuser = None\n self.carlocked = False",
"def deny_access(self, context, share, access, share_server=None):\r\n LOG.debug(\"Deny access.\")\r\n self.helper._deny_access(share['name'], access, share['share_proto'])",
"def stop(self):\n self._refresh_job.cancel()\n super().stop()",
"def delete(self, userguid, jobguid=\"\", executionparams=dict()):",
"def removeShare(self, name):\n if not q.basetype.string.check(name):\n raise TypeError('Name is not a string type')\n \n if not name in self.shares:\n raise KeyError(\"Share '%s' isn't registerd in shares\"%name)\n \n if self.shares[name].deleted:\n raise ValueError(\"Share '%s' is already removed from shares\"%name)\n \n self.shares[name].deleted = True",
"def _unshare_file(target, force=False):\n logging.debug(\"Un-sharing file %s\" % target)\n if not force and os.stat(target).st_nlink == 1:\n msg = \"File %s has ONE hard link. Un-sharing this file will delete it! Apply \\'--force\\' to do so.\" % target\n logging.error(msg)\n raise FileNotFoundError(msg)\n os.unlink(target)",
"def share_job(self, job, users, context=None):\n return self._client.call_method(\n 'UserAndJobState.share_job',\n [job, users], self._service_ver, context)",
"async def stop(self):\n self._job.cancel()\n await super().stop()",
"def stop(self):\n # Get the current future instance\n future = self.future\n\n # Cancel the job\n if future:\n future.cancel()",
"def stop_job(self,\n ssh_client,\n name,\n job_options,\n is_singularity,\n logger,\n workdir=None):\n if not self._checkSshClient(ssh_client, logger):\n return False\n\n call = self._build_job_cancellation_call(name,\n job_options,\n logger)\n if call is None:\n return False\n\n return self._execute_shell_command(ssh_client,\n call,\n workdir=workdir)"
]
| [
"0.68657285",
"0.6582266",
"0.61853975",
"0.6103804",
"0.59372395",
"0.5798973",
"0.57962316",
"0.5777436",
"0.57301474",
"0.5667062",
"0.5658682",
"0.56567",
"0.56214386",
"0.5581508",
"0.5578251",
"0.552801",
"0.5428903",
"0.5402245",
"0.5379686",
"0.5358543",
"0.53576696",
"0.5342422",
"0.5332455",
"0.53247696",
"0.5297131",
"0.52948785",
"0.52716875",
"0.5234936",
"0.52081007",
"0.517887"
]
| 0.7299188 | 0 |
Get the owner of a job. | def get_job_owner(self, job, context=None):
return self._client.call_method(
'UserAndJobState.get_job_owner',
[job], self._service_ver, context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def owner(self) -> None:\n return self.bot.get_user(self.bot.config.owner_ids[0])",
"def get_owner(self):\n return self.__owner",
"def get_owner(self, obj):\n return obj.user.username",
"def owner(self):\n answer = self._call('owner')\n return answer.owner",
"def get_owner(self):\n return self.settings.get(\"owner\", None)",
"def owner(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"owner\")",
"def _get_owner(self):\n if self.resource.owner is not None:\n try:\n return pwd.getpwnam(self.resource.owner).pw_uid\n except KeyError:\n raise error.InvalidUser()",
"def owner(self) -> discord.User:\n if self.config.owner_id:\n return self.get_user(self.config.owner_id)\n if self.owner_ids:\n return self.get_user(self.config.owner_ids[0])\n return None",
"def getOwner(self):\n return self.__owner",
"def getOwner(self):\n return self.__owner",
"def getOwner(self):\r\n return self.owner",
"def owner(self):\n return Organization.objects.get(id=self.owner_id)",
"def owner(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"owner\")",
"def owner(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"owner\")",
"def owner(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"owner\")",
"def get_object(self) -> Job:\n project = ProjectPermissionsMixin.get_object(self)\n return project.jobs.get(id=self.kwargs[\"job\"])",
"def get_owner(self):\n return self._creatorsHeap[0][1]",
"def get_subscription_owner(self, request):\n return get_user_model().objects.get(km_user__pk=self.kwargs[\"pk\"])",
"def get_subscription_owner(self, request):\n return get_user_model().objects.get(km_user__pk=self.kwargs[\"pk\"])",
"def get_subscription_owner(self, request):\n return get_user_model().objects.get(km_user__pk=self.kwargs.get(\"pk\"))",
"def get_subscription_owner(self, request):\n return get_user_model().objects.get(\n km_user__profile__pk=self.kwargs.get(\"pk\")\n )",
"def owner(self) -> str:\n return self._owner",
"def owner(self):\n return self._owner",
"def owner(self):\n return self._owner",
"def owner(self):\n return self._owner",
"def owner(self):\n return self._owner",
"def owner(self):\n return self._owner",
"def owner_id(self) -> Optional[str]:\n return pulumi.get(self, \"owner_id\")",
"def get_subscription_owner(request, profile):\n return profile.km_user.user",
"def _get_job(self, uid):\n try:\n return self._jobs[uid]\n except KeyError:\n raise JobNotFoundError('job \\'%s\\' is not found' % (uid,))"
]
| [
"0.7093864",
"0.705176",
"0.6957282",
"0.69351876",
"0.692269",
"0.6863196",
"0.68565017",
"0.68523395",
"0.68463093",
"0.68463093",
"0.682809",
"0.680491",
"0.67452174",
"0.67452174",
"0.671068",
"0.669592",
"0.6671211",
"0.666671",
"0.666671",
"0.6664125",
"0.6561198",
"0.6513149",
"0.65058917",
"0.65058917",
"0.65058917",
"0.65058917",
"0.65058917",
"0.65008",
"0.64300185",
"0.64089024"
]
| 0.8874359 | 0 |
Delete a job. Will fail if the job is not complete. Only the job owner can delete a job. | def delete_job(self, job, context=None):
return self._client.call_method(
'UserAndJobState.delete_job',
[job], self._service_ver, context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_job(self, job):\n subprocess.call(self.cli + [PlatformJenkinsJavaCLI.DELETE_JOB, job.name])",
"def DeleteJob(self, job_urn, token=None):\n aff4.FACTORY.Delete(job_urn, token=token)",
"def delete(\n address: Optional[str],\n job_id: str,\n headers: Optional[str],\n verify: Union[bool, str],\n):\n client = _get_sdk_client(address, headers=headers, verify=verify)\n client.delete_job(job_id)\n cli_logger.print(f\"Job '{job_id}' deleted successfully\")",
"def delete_job(self, jobid=None, squash=None):\n\n self.check_all_jobs()\n\n if jobid is None:\n if hasattr(self, 'current_job'):\n jobid = self.current_job\n\n if jobid:\n if hasattr(self, 'current_job'):\n if jobid == self.current_job:\n del self.current_job\n\n if self.job_dict[jobid] in ['COMPLETED', 'ERROR',\n 'ABORTED', 'PENDING']:\n result = self.session.delete(\n CosmoSim.QUERY_URL + \"/{}\".format(jobid),\n auth=(self.username, self.password), data={'follow': ''})\n\n else:\n warnings.warn(\"Can only delete a job with phase: \"\n \"'COMPLETED', 'ERROR', 'ABORTED', or 'PENDING'.\")\n return\n\n if not result.ok:\n result.raise_for_status()\n if squash is None:\n warnings.warn('Deleted job: {}'.format(jobid))\n\n return result",
"def _delete_job(self, job):",
"def test_job_delete_completed_job(self):\n test_app = self._create_app()\n class_path = \"spark.jobserver.VeryShortDoubleJob\"\n job = self.client.jobs.create(test_app, class_path,\n ctx=self._get_functional_context())\n time.sleep(3)\n self._wait_till_job_is_done(job)\n self.assertRaises(exceptions.NotFoundException,\n self.client.jobs.delete, job.jobId)",
"def delete(job_id):\n job = JobModel.get_one_job(job_id)\n if not job:\n return custom_response({'Error':'Job Not Found'}, 404)\n\n JobModel.query.filter(JobModel.job_id == job_id).delete()\n\n return custom_response({'Message': 'Deleted'}, 204)",
"def delete(self):\n parser = reqparse.RequestParser()\n parser.add_argument(\"job_id\", type=str, location=\"form\")\n args = parser.parse_args()\n job_id = args[\"job_id\"]\n if job_id is None or job_id == \"\":\n return errors.all_errors(\n \"CLIENT_MISSING_PARAMETER\", \"job_id (str) parameter is required\"\n )\n\n get_job_info = get(\n config.Config.FLASK_ENDPOINT + \"/api/scheduler/job\",\n headers={\"X-SOCA-TOKEN\": config.Config.API_ROOT_KEY},\n params={\"job_id\": job_id},\n verify=False,\n ) # nosec\n\n if get_job_info.status_code != 200:\n return {\n \"success\": False,\n \"message\": \"Unable to retrieve this job. Job may have terminated\",\n }, 500\n else:\n job_info = get_job_info.json()[\"message\"]\n job_owner = job_info[\"Job_Owner\"].split(\"@\")[0]\n request_user = request.headers.get(\"X-SOCA-USER\")\n if request_user is None:\n return errors.all_errors(\"X-SOCA-USER_MISSING\")\n if request_user != job_owner:\n return errors.all_errors(\"CLIENT_NOT_OWNER\")\n try:\n qdel_command = config.Config.PBS_QDEL + \" \" + job_id\n try:\n delete_job = subprocess.check_output(shlex.split(qdel_command))\n return {\"success\": True, \"message\": \"Job deleted\"}\n except Exception as err:\n return {\n \"success\": False,\n \"message\": \"Unable to execute qdel command: \" + str(err),\n }, 500\n\n except Exception as err:\n return {\"success\": False, \"message\": \"Unknown error: \" + str(err)}, 500",
"def _delete_job(self, job):\n with self.db_lock:\n return self.rcon.zrem(job)",
"def delete(self, job_id):\n # Only admin can delete any job\n if not current_user.is_admin():\n return get_message_json('删除任务需要管理员权限'), HTTPStatus.FORBIDDEN\n\n try:\n result = jobs.delete_job_by_id(job_id)\n if result == 1:\n return get_message_json('已删除该任务'), HTTPStatus.OK\n else:\n if jobs.find_job_by_id(job_id) is None:\n return get_message_json('任务不存在'), HTTPStatus.NOT_FOUND\n return get_message_json('未知的任务删除失败'), HTTPStatus.BAD_REQUEST\n except Exception as err:\n return handle_internal_error(str(err))",
"def deleteJob(self, jobId):\n params = {'id': jobId}\n try:\n return self.gc.delete(JobUtils.JOB_ID_PATH, parameters=params)\n except HttpError as e:\n if e.status == 400:\n print('Error. invalid job id:', jobId)\n return {}\n raise",
"def force_delete_job(self, token, job, context=None):\n return self._client.call_method(\n 'UserAndJobState.force_delete_job',\n [token, job], self._service_ver, context)",
"def delete_job_final(self, job_id):\n job = self.backend.get_job(job_id)\n sure = self.yes_no_dialog(\"Are you sure you want to delete Job '{}'?\".format(job.title))\n\n if not sure:\n return\n\n self.backend.job_delete(job_id)\n self.refresh_jobs()",
"def delete_job(request, job_id):\n job = get_object_or_404(Jobs, pk=job_id)\n\n if request.user.id != job.author.id:\n messages.error(request, 'You can only delete your own job profiles')\n return redirect(reverse('view_home'))\n\n job.delete()\n messages.success(request, 'You have successfully deleted the job profile!')\n return redirect(reverse('all_jobs'))",
"def delete_job(api_instance, job_name):\n api_response = api_instance.delete_namespaced_job(\n name=job_name,\n namespace=\"default\",\n body=client.V1DeleteOptions(\n propagation_policy=\"Foreground\", grace_period_seconds=5\n ),\n )\n logger.info(\"Job deleted with status='%s'\" % str(api_response.status))",
"def test_delete_job(self):\n response = self.client.open(\n '/tx-queue/2/scheduler/job/{jobId}'.format(jobId=1),\n method='DELETE')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def cmd_delete_job():\r\n id = request.form.get('id', \"\")\r\n confirm = request.form.get(\"confirm\", \"\")\r\n if confirm != \"DELETE\":\r\n flash(f\"Contact '{id}' NOT deleted. Please enter DELETE in the confirm field.\")\r\n return redirect(url_for('main.jobs'))\r\n \r\n index = get_job_by_id(id)\r\n Job.query.filter(Job.id == id).delete()\r\n db.session.commit()\r\n\r\n\r\n if index != None:\r\n flash(f\"Job '{id}' was succesfully deleted!\")\r\n return redirect(url_for('main.jobs'))\r\n else:\r\n flash(f\"Job '{id}' was not found\")\r\n return redirect(url_for('main.jobs'))",
"def job_delete(job):\n\n if os.path.exists(job.output_abspath):\n os.remove(job.output_abspath)\n db.session.delete(job)\n db.session.commit()",
"def delete_job(self, filename):\n job = Jobs.get(Jobs.filename == filename)\n job.delete_instance()",
"def delete_dlp_job(project, job_name):\n\n # Import the client library.\n import google.cloud.dlp\n\n # Instantiate a client.\n dlp = google.cloud.dlp.DlpServiceClient()\n\n # Convert the project id and job name into a full resource id.\n name = dlp.dlp_job_path(project, job_name)\n\n # Call the API to delete job.\n dlp.delete_dlp_job(name)\n\n print('Successfully deleted %s' % job_name)",
"def delete_job_by_id(self, job_id):\n try:\n self._session.query(JobEntity).\\\n filter(JobEntity.id == job_id).\\\n delete(synchronize_session=False)\n except SQLAlchemyError as err:\n Log.an().error('sql exception [%s]', str(err))\n return False\n\n return True",
"def destroy(self, request, pk=None):\n try:\n job = Job.objects.get(pk=pk)\n job.delete()\n\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n\n except Job.DoesNotExist as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)\n\n except Exception as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)",
"def removeJob(self):\n job, name = self.getJob() \n answer = tkMessageBox.askyesno(\"Warning\",'Remove this job?')\n if answer == False:\n return \n try: \n self.jobManager.deleteJob(job)\n except:\n print 'job not in database, removing from peat'\n del self.DB.meta.peatsa_jobs[name]\n self.DB.meta.__p__changed = 1\n self.updateJobs()\n return",
"def test_job_delete_non_existing(self):\n self.assertRaises(exceptions.NotFoundException,\n self.client.jobs.delete, 'does-not-exist')",
"def remove(self, job_or_id):\n if isinstance(job_or_id, Job):\n job = job_or_id\n else:\n job = Job(connection=self.connection, id=job_or_id)\n\n try:\n job.refresh()\n job._delete()\n except NoSuchJobError:\n pass\n\n self._remove(job.id)",
"def remove(self, job_or_id):\n job_id = job_or_id.id if isinstance(job_or_id, Job) else job_or_id\n self.connection.lrem(self.key, 0, job_id)\n return defer.succeed(job_or_id)",
"def delete_job():\r\n id = request.args.get('id', \"\")\r\n return render_template(\"delete_job.html\", id=id)",
"def cancel_job(self, job):\n try:\n self.jobs.remove(job)\n except ValueError:\n pass",
"def destroy(self):\n if Path(self.path).exists():\n logger.info(f\"Trying to destroy old job {self.name}\")\n try:\n shutil.rmtree(self.path)\n except PermissionError:\n logger.error(f\"Cannot destroy job {self.name}!\")\n logger.error(\"Permission error.\")\n except:\n logger.error(f\"Cannot destroy job {self.name}!\")\n logger.error(\"Unknown error.\")\n else:\n logger.info(f\"Destroyed job {self.name}\")",
"def delete_job(self, id, jobstore=None):\n\n self._scheduler.remove_job(id, jobstore)"
]
| [
"0.7845254",
"0.78164786",
"0.77987504",
"0.77536505",
"0.7749422",
"0.7680496",
"0.76609457",
"0.7616329",
"0.7511488",
"0.7430227",
"0.7350586",
"0.72614837",
"0.7252624",
"0.7240872",
"0.71866345",
"0.71839607",
"0.7164222",
"0.71450466",
"0.70545286",
"0.6984639",
"0.68680763",
"0.67357963",
"0.67344886",
"0.670923",
"0.669926",
"0.6658565",
"0.6636458",
"0.66354024",
"0.6585014",
"0.6571818"
]
| 0.78505033 | 0 |
Force delete a job will succeed unless the job has not been started. In that case, the service must start the job and then delete it, since a job is not "owned" by any service until it is started. Only the job owner can delete a job. | def force_delete_job(self, token, job, context=None):
return self._client.call_method(
'UserAndJobState.force_delete_job',
[token, job], self._service_ver, context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _delete_job(self, job):",
"def _delete_job(self, job):\n with self.db_lock:\n return self.rcon.zrem(job)",
"def delete_job(self, job):\n subprocess.call(self.cli + [PlatformJenkinsJavaCLI.DELETE_JOB, job.name])",
"def DeleteJob(self, job_urn, token=None):\n aff4.FACTORY.Delete(job_urn, token=token)",
"def delete(self):\n parser = reqparse.RequestParser()\n parser.add_argument(\"job_id\", type=str, location=\"form\")\n args = parser.parse_args()\n job_id = args[\"job_id\"]\n if job_id is None or job_id == \"\":\n return errors.all_errors(\n \"CLIENT_MISSING_PARAMETER\", \"job_id (str) parameter is required\"\n )\n\n get_job_info = get(\n config.Config.FLASK_ENDPOINT + \"/api/scheduler/job\",\n headers={\"X-SOCA-TOKEN\": config.Config.API_ROOT_KEY},\n params={\"job_id\": job_id},\n verify=False,\n ) # nosec\n\n if get_job_info.status_code != 200:\n return {\n \"success\": False,\n \"message\": \"Unable to retrieve this job. Job may have terminated\",\n }, 500\n else:\n job_info = get_job_info.json()[\"message\"]\n job_owner = job_info[\"Job_Owner\"].split(\"@\")[0]\n request_user = request.headers.get(\"X-SOCA-USER\")\n if request_user is None:\n return errors.all_errors(\"X-SOCA-USER_MISSING\")\n if request_user != job_owner:\n return errors.all_errors(\"CLIENT_NOT_OWNER\")\n try:\n qdel_command = config.Config.PBS_QDEL + \" \" + job_id\n try:\n delete_job = subprocess.check_output(shlex.split(qdel_command))\n return {\"success\": True, \"message\": \"Job deleted\"}\n except Exception as err:\n return {\n \"success\": False,\n \"message\": \"Unable to execute qdel command: \" + str(err),\n }, 500\n\n except Exception as err:\n return {\"success\": False, \"message\": \"Unknown error: \" + str(err)}, 500",
"def job_delete(job):\n\n if os.path.exists(job.output_abspath):\n os.remove(job.output_abspath)\n db.session.delete(job)\n db.session.commit()",
"def test_job_delete_completed_job(self):\n test_app = self._create_app()\n class_path = \"spark.jobserver.VeryShortDoubleJob\"\n job = self.client.jobs.create(test_app, class_path,\n ctx=self._get_functional_context())\n time.sleep(3)\n self._wait_till_job_is_done(job)\n self.assertRaises(exceptions.NotFoundException,\n self.client.jobs.delete, job.jobId)",
"def delete(\n address: Optional[str],\n job_id: str,\n headers: Optional[str],\n verify: Union[bool, str],\n):\n client = _get_sdk_client(address, headers=headers, verify=verify)\n client.delete_job(job_id)\n cli_logger.print(f\"Job '{job_id}' deleted successfully\")",
"def delete_job(api_instance, job_name):\n api_response = api_instance.delete_namespaced_job(\n name=job_name,\n namespace=\"default\",\n body=client.V1DeleteOptions(\n propagation_policy=\"Foreground\", grace_period_seconds=5\n ),\n )\n logger.info(\"Job deleted with status='%s'\" % str(api_response.status))",
"def force_delete(self):\n self.manager.force_delete(self)",
"def force_delete(self):\n self.manager.force_delete(self)",
"def delete_jobs(self):\n jobs = self.get_jobs(self.age)\n print('Jobs queued for delete: ', jobs)\n for job in jobs:\n try: \n body = k_client.V1DeleteOptions(propagation_policy='Background')\n self.kube_v1_batch_client.delete_namespaced_job(job, body=body, namespace=self.project)\n self.kube_client.delete_namespaced_persistent_volume_claim(job+\"-storage-claim\", self.project, {})\n print('Deleted job: ', job)\n except ApiException as e:\n print(\"Exception when calling BatchV1Api -> delete_namespaced_job: %s\\n\" % e)\n exit(1)",
"def delete_job_final(self, job_id):\n job = self.backend.get_job(job_id)\n sure = self.yes_no_dialog(\"Are you sure you want to delete Job '{}'?\".format(job.title))\n\n if not sure:\n return\n\n self.backend.job_delete(job_id)\n self.refresh_jobs()",
"def delete_job(self, job, context=None):\n return self._client.call_method(\n 'UserAndJobState.delete_job',\n [job], self._service_ver, context)",
"def delete(job_id):\n job = JobModel.get_one_job(job_id)\n if not job:\n return custom_response({'Error':'Job Not Found'}, 404)\n\n JobModel.query.filter(JobModel.job_id == job_id).delete()\n\n return custom_response({'Message': 'Deleted'}, 204)",
"def destroy(self):\n if Path(self.path).exists():\n logger.info(f\"Trying to destroy old job {self.name}\")\n try:\n shutil.rmtree(self.path)\n except PermissionError:\n logger.error(f\"Cannot destroy job {self.name}!\")\n logger.error(\"Permission error.\")\n except:\n logger.error(f\"Cannot destroy job {self.name}!\")\n logger.error(\"Unknown error.\")\n else:\n logger.info(f\"Destroyed job {self.name}\")",
"def delete_job(self, filename):\n job = Jobs.get(Jobs.filename == filename)\n job.delete_instance()",
"def delete_job(self, jobid=None, squash=None):\n\n self.check_all_jobs()\n\n if jobid is None:\n if hasattr(self, 'current_job'):\n jobid = self.current_job\n\n if jobid:\n if hasattr(self, 'current_job'):\n if jobid == self.current_job:\n del self.current_job\n\n if self.job_dict[jobid] in ['COMPLETED', 'ERROR',\n 'ABORTED', 'PENDING']:\n result = self.session.delete(\n CosmoSim.QUERY_URL + \"/{}\".format(jobid),\n auth=(self.username, self.password), data={'follow': ''})\n\n else:\n warnings.warn(\"Can only delete a job with phase: \"\n \"'COMPLETED', 'ERROR', 'ABORTED', or 'PENDING'.\")\n return\n\n if not result.ok:\n result.raise_for_status()\n if squash is None:\n warnings.warn('Deleted job: {}'.format(jobid))\n\n return result",
"def remove_job(job_id):\n subprocess.check_call(['atrm', str(job_id)])\n return job_id",
"def delete(self, force='False'):\n return self.manager.delete(self, force)",
"def removeJob(self):\n job, name = self.getJob() \n answer = tkMessageBox.askyesno(\"Warning\",'Remove this job?')\n if answer == False:\n return \n try: \n self.jobManager.deleteJob(job)\n except:\n print 'job not in database, removing from peat'\n del self.DB.meta.peatsa_jobs[name]\n self.DB.meta.__p__changed = 1\n self.updateJobs()\n return",
"def cmd_delete_job():\r\n id = request.form.get('id', \"\")\r\n confirm = request.form.get(\"confirm\", \"\")\r\n if confirm != \"DELETE\":\r\n flash(f\"Contact '{id}' NOT deleted. Please enter DELETE in the confirm field.\")\r\n return redirect(url_for('main.jobs'))\r\n \r\n index = get_job_by_id(id)\r\n Job.query.filter(Job.id == id).delete()\r\n db.session.commit()\r\n\r\n\r\n if index != None:\r\n flash(f\"Job '{id}' was succesfully deleted!\")\r\n return redirect(url_for('main.jobs'))\r\n else:\r\n flash(f\"Job '{id}' was not found\")\r\n return redirect(url_for('main.jobs'))",
"def __delete_job_status(self, job: Job):\n\n keys = self._get_keys(f'jobstatus:{job.id}:*')\n for key in keys:\n self.redis_client.delete(key)",
"async def delete(self, job):\n # nothing to delete if it doesn't exist\n info = await self.middleware.call('gluster.volume.exists_and_started', CTDB_VOL_NAME)\n if not info['exists']:\n return\n\n # stop the gluster volume\n if info['started']:\n options = {'args': (CTDB_VOL_NAME,), 'kwargs': {'force': True}}\n job.set_progress(33, f'Stopping gluster volume {CTDB_VOL_NAME!r}')\n await self.middleware.call('gluster.method.run', volume.stop, options)\n\n # finally, we delete it\n job.set_progress(66, f'Deleting gluster volume {CTDB_VOL_NAME!r}')\n await self.middleware.call('gluster.method.run', volume.delete, {'args': (CTDB_VOL_NAME,)})\n job.set_progress(100, f'Successfully deleted {CTDB_VOL_NAME!r}')",
"def delete_job(self, id, jobstore=None):\n\n self._scheduler.remove_job(id, jobstore)",
"def test_delete_job(self):\n response = self.client.open(\n '/tx-queue/2/scheduler/job/{jobId}'.format(jobId=1),\n method='DELETE')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"async def job_remove(self, uid):\n self._require_running()\n job = self._get_job(uid)\n await job.close()\n del self._jobs[uid]\n del self._jobs_by_connection[job.sender.connection][uid]\n if len(self._jobs_by_connection[job.sender.connection]) == 0:\n del self._jobs_by_connection[job.sender.connection]\n self._log.debug('Removed job %s', job)",
"def cancel_job(self, job):\n try:\n self.jobs.remove(job)\n except ValueError:\n pass",
"def delete_job():\n fsurfer.log.initialize_logging()\n logger = fsurfer.log.get_logger()\n\n parser = argparse.ArgumentParser(description=\"Process and remove old results\")\n # version info\n parser.add_argument('--version', action='version', version='%(prog)s ' + VERSION)\n # Arguments for action\n parser.add_argument('--dry-run', dest='dry_run',\n action='store_true', default=False,\n help='Mock actions instead of carrying them out')\n parser.add_argument('--debug', dest='debug',\n action='store_true', default=False,\n help='Output debug messages')\n\n args = parser.parse_args(sys.argv[1:])\n if args.debug:\n fsurfer.log.set_debugging()\n if args.dry_run:\n sys.stdout.write(\"Doing a dry run, no changes will be made\\n\")\n\n conn = fsurfer.helpers.get_db_client()\n cursor = conn.cursor()\n job_query = \"SELECT jobs.id, \" \\\n \" jobs.username, \" \\\n \" jobs.state, \" \\\n \" job_run.pegasus_ts, \" \\\n \" jobs.subject \" \\\n \"FROM freesurfer_interface.jobs AS jobs, \" \\\n \" freesurfer_interface.job_run AS job_run \" \\\n \"WHERE jobs.state = 'DELETE PENDING' AND \" \\\n \" jobs.id = job_run.job_id\"\n job_update = \"UPDATE freesurfer_interface.jobs \" \\\n \"SET state = 'DELETED' \" \\\n \"WHERE id = %s;\"\n try:\n cursor.execute(job_query)\n for row in cursor.fetchall():\n workflow_id = row[0]\n username = row[1]\n logger.info(\"Deleting workflow {0} for user {1}\".format(workflow_id,\n username))\n # pegasus_ts is stored as datetime in the database, convert it to what we have on the fs\n pegasus_ts = row[3]\n\n if pegasus_ts is None:\n # not submitted yet\n logger.info(\"Workflow {0} not \".format(workflow_id) +\n \"submitted, updating\")\n cursor.execute(job_update, [workflow_id])\n if args.dry_run:\n conn.rollback()\n else:\n conn.commit()\n continue\n\n workflow_dir = os.path.join(fsurfer.FREESURFER_SCRATCH,\n username,\n 'workflows',\n 'fsurf',\n 'pegasus',\n 'freesurfer',\n pegasus_ts)\n result_dir = os.path.join(fsurfer.FREESURFER_BASE,\n username,\n 'workflows',\n 'output',\n 'fsurf',\n 'pegasus',\n 'freesurfer',\n pegasus_ts)\n if args.dry_run:\n sys.stdout.write(\"Would run pegasus-remove \"\n \"{0}\\n\".format(result_dir))\n else:\n try:\n output = subprocess.check_output(['/usr/bin/pegasus-remove',\n workflow_dir],\n stderr=subprocess.STDOUT)\n exit_code = 0\n except subprocess.CalledProcessError as err:\n exit_code = err.returncode\n output = err.output\n # job removed (code = 0) just now or it's been removed earlier\n if exit_code == 0 or 'not found' in output:\n # look for condor job id and wait a bit for pegasus to remove it\n # so that we can delete the pegasus directories\n job_id = re.match(r'Job (\\d+.\\d+) marked for removal', output)\n if job_id is not None:\n logger.info(\"Waiting for running jobs to be removed...\\n\")\n count = 0\n while True:\n time.sleep(10)\n try:\n output = subprocess.check_output([\"/usr/bin/condor_q\",\n job_id.group(1)])\n except subprocess.CalledProcessError:\n logger.exception(\"An error occurred while \"\n \"checking for running \"\n \"jobs, exiting...\\n\")\n break\n if 'pegasus-dagman' not in output:\n break\n count += 1\n if count > 30:\n logger.error(\"Can't remove job, exiting...\\n\")\n break\n else:\n logger.error(\"Got error while removing workflow, \"\n \"exitcode: {0} error: {1}\".format(exit_code, output))\n logger.info(\"Jobs removed, removing workflow directory\\n\")\n try:\n if not args.dry_run and os.path.exists(workflow_dir):\n shutil.rmtree(workflow_dir)\n except shutil.Error:\n logger.exception(\"Can't remove directory at \"\n \"{0}, exiting...\\n\".format(workflow_dir))\n\n deletion_list = []\n # add input file\n input_files = get_input_files(workflow_id)\n if input_files is None:\n logger.error(\"Can't find input files for \" +\n \"workflow {0}\".format(workflow_id))\n else:\n deletion_list.extend(input_files)\n # remove files in result dir\n if os.path.isdir(result_dir):\n for entry in os.listdir(result_dir):\n deletion_list.append(os.path.join(result_dir, entry))\n if os.path.exists(result_dir):\n deletion_list.append(result_dir)\n # delete output and log copied over after workflow completion\n # if present\n deletion_list.append(os.path.join(fsurfer.FREESURFER_BASE,\n username,\n 'results',\n 'recon_all-{0}.log'.format(workflow_id)))\n deletion_list.append(os.path.join(fsurfer.FREESURFER_BASE,\n username,\n 'results',\n \"{0}_{1}_output.tar.bz2\".format(workflow_id,\n row[4])))\n for entry in deletion_list:\n if args.dry_run:\n sys.stdout.write(\"Would delete {0}\\n\".format(entry))\n else:\n logger.info(\"Removing {0}\".format(entry))\n if not purge_workflow_file(entry):\n logger.error(\"Can't remove {0} for job {1}\".format(entry,\n workflow_id))\n logger.info(\"Setting workflow {0} to DELETED\".format(workflow_id))\n cursor.execute(job_update, [workflow_id])\n if args.dry_run:\n conn.rollback()\n else:\n conn.commit()\n except psycopg2.Error as e:\n logger.exception(\"Error: {0}\".format(e))\n return 1\n finally:\n conn.commit()\n conn.close()\n\n retcode = delete_incomplete_jobs()\n return retcode",
"def deleteTask():\n\tmarkOff(isdelete = 1)"
]
| [
"0.74902743",
"0.67161715",
"0.6704146",
"0.6626484",
"0.6573256",
"0.6544934",
"0.6502887",
"0.6476197",
"0.6469996",
"0.64464337",
"0.64464337",
"0.6444198",
"0.642586",
"0.6412675",
"0.6338228",
"0.63190943",
"0.6295222",
"0.6279184",
"0.60903525",
"0.602891",
"0.6018014",
"0.6013906",
"0.59924954",
"0.5985098",
"0.5942512",
"0.59421515",
"0.5927455",
"0.5911734",
"0.590154",
"0.580756"
]
| 0.77460086 | 0 |
Returns a UserConfig object from an environment variable. | def load_from_env_var(cls, env_var):
return cls.load_from_filename(os.environ.get(env_var)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def from_env():\n app_env = os.getenv(\"APP_ENV\", \"TEST\")\n return ConfigurationFactory.get_config(app_env)",
"def config(env=DEFAULT_ENV, default=None, engine=None):\r\n\r\n config = {}\r\n\r\n s = os.environ.get(env, default)\r\n\r\n if s:\r\n config = parse(s, engine)\r\n\r\n return config",
"def parse_env(env_file, env=None):\n parser = configparser.ConfigParser()\n parser.read_file(env_file)\n\n if env is None:\n if not parser.has_section('pblog'):\n raise EnvError(\"pblog section was not found\")\n env = parser['pblog']['env']\n\n env_section = 'pblog:%s' % env\n if not parser.has_section(env_section):\n raise EnvError(\"Environment %s not defined\" % env_section)\n\n return Environment(\n name=env,\n url=parser[env_section]['url'].rstrip('/'),\n username=parser[env_section]['username'],\n local_app_module=parser[env_section].get('wsgi'))",
"def balaio_config_from_env():\n try:\n filepath = os.environ['BALAIO_SETTINGS_FILE']\n except KeyError:\n raise ValueError('missing env variable BALAIO_SETTINGS_FILE')\n\n return Configuration.from_file(filepath)",
"def alembic_config_from_env():\n try:\n filepath = os.environ['BALAIO_ALEMBIC_SETTINGS_FILE']\n except KeyError:\n raise ValueError('missing env variable BALAIO_ALEMBIC_SETTINGS_FILE')\n\n return Configuration.from_file(filepath)",
"def get_envvar_configuration(app_name, load_as_json=True):\n configuration = {}\n prefix = '{0}_'.format(app_name.upper())\n for key, value in os.environ.items():\n if key.startswith(prefix):\n config_name = key[len(prefix):]\n configuration[config_name] = value\n if load_as_json:\n return _read_as_json(configuration)\n return configuration",
"def from_envvar(self, variable_name, silent=False):\n\t\trv = os.environ.get(variable_name)\n\t\tif not rv:\n\t\t\tif silent:\n\t\t\t\treturn False\n\t\t\traise RuntimeError('The environment variable %r is not set '\n\t\t\t\t\t\t\t 'and as such configuration could not be '\n\t\t\t\t\t\t\t 'loaded. Set this variable and make it '\n\t\t\t\t\t\t\t 'point to a configuration file' %\n\t\t\t\t\t\t\t variable_name)\n\t\treturn self.from_pyfile(rv, silent=silent)",
"def get_env(config_uri):\n env = paster.bootstrap(config_uri)\n return env",
"def config(env=DEFAULT_ENV, default=None, engine=None, conn_max_age=0, ssl_require=False):\n\n config = {}\n\n s = os.environ.get(env, default)\n\n if s:\n config = parse(s, engine, conn_max_age, ssl_require)\n\n return config",
"def read(cls, envvar=\"CONFIG_FILE\", filename=\"config.json\"):\n filename = os.environ.get(envvar, filename)\n directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n filename = directory + \"/\" + filename\n try:\n with open(filename, \"r\") as config_file:\n config = json.loads(config_file.read())\n except FileNotFoundError:\n config = {}\n\n return cls(config)",
"def load_user_config(configpath):\n configpath = os.path.normpath(os.path.expanduser(configpath))\n log.info('trying to import config from %s...' % configpath)\n try:\n fp, pathname, description = imp.find_module('config', [configpath])\n except ImportError:\n log.warning('%s/config.py not found - using default config' % configpath)\n return None\n mod = imp.load_module('config', fp, pathname, description)\n return getattr(mod, 'config')",
"def get_user_config(project_path, use_cache=True):\n\n if sys.platform == 'win32':\n user_config = os.path.expanduser(r'~\\\\tidypy')\n else:\n user_config = os.path.join(\n os.getenv('XDG_CONFIG_HOME') or os.path.expanduser('~/.config'),\n 'tidypy'\n )\n\n return get_specific_config(user_config, project_path, use_cache=use_cache)",
"def configure_environment(ctx, user):\n try:\n out = ctx.obj.configuration(\n user\n )\n print_message(out[\"path\"])\n except BaseException as e:\n print_error(e.message)",
"def get_environment_var(env_name, default_value):\n if env_name in os.environ:\n return os.environ[env_name]\n else:\n return default_value",
"def account_from_env(cls):\n return os.getenv(\"OIO_ACCOUNT\", \"myaccount\")",
"def __load_config(runtime_env):\n config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"config.ini\")\n if not os.path.exists(config_file):\n raise FileNotFoundError(config_file)\n _app_config = configparser.ConfigParser()\n _app_config.read(config_file)\n\n # Evaluate\n _app_config = _app_config[runtime_env]\n return _app_config",
"def get_user():\n return os.getenv(\"USER\")",
"def fromenv(cls) -> 'Config':\n files = Config.find_config_files()\n if not files:\n log.info(\n \"Could not find default config: `~/.wpwatcher/wpwatcher.conf`, `~/wpwatcher.conf` or `./wpwatcher.conf`\"\n )\n return cls.default()\n else:\n return cls.fromfiles(files)",
"def get_config(path):\n if configparser is None:\n return None\n\n # Check for env specific configs first\n if os.path.exists(os.path.join(ROOT, 'config', NAME, path)):\n path = os.path.join(ROOT, 'config', NAME, path)\n else:\n path = os.path.join(ROOT, 'config', path)\n\n if not os.path.isfile(path):\n return None\n\n conf = open(path, 'rt').read()\n conf = os.path.expandvars(conf)\n\n config = configparser.SafeConfigParser()\n if sys.version_info[0] == 2:\n from io import StringIO\n config.readfp(StringIO(unicode(conf)))\n else:\n config.read_string(conf)\n return config",
"def get_config() -> Config:\n app_config = os.environ.get('APP_CONFIG', 'ProductionConfig')\n config_module = importlib.import_module(\n '.'.join(\n f\"magma.fluentd_client.config.{app_config}\".split('.')[\n :-1\n ],\n ),\n )\n config_class = getattr(config_module, app_config.split('.')[-1])\n return config_class()",
"def env(var):\n return os.environ[var]",
"def get_user_config_path():\n\n return \"{}/.dapsenv/dapsenv.conf\".format(expanduser(\"~\"))",
"def get_jenkins_env_variable_from_credentials(env_var_dot_path, require_jenkins_user=True):\n if require_jenkins_user and subprocess.check_output([\"whoami\"]) != 'jenkins':\n raise ValueError(\"Only Jenkins user can retrieve this value\")\n\n DM_CREDENTIALS_REPO = os.environ.get('DM_CREDENTIALS_REPO', '../digitalmarketplace-credentials')\n creds_json = _decrypt_yaml_file_with_sops(DM_CREDENTIALS_REPO, 'jenkins-vars/jenkins.yaml')\n\n return _get_nested_value(creds_json, env_var_dot_path.split('.'))",
"def load_config():\n config_file = os.path.join(\n Path(os.path.dirname(os.path.realpath(__file__))).parent,\n \"config.ini\"\n )\n if not os.path.exists(config_file):\n raise FileNotFoundError(config_file)\n app_config = configparser.ConfigParser()\n app_config.read(config_file)\n return app_config['uberoo']",
"def get_username_from_env():\n username = None\n try:\n username = os.environ['JIRA_USERNAME']\n except KeyError:\n vprint(\"No user name found in JIRA_USERNAME environment variable\")\n\n return username",
"def get_uxi_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Uxi.ConfigV1, src)\n if o is not None: return o\n\n return None",
"def get_setting(key):\n try:\n from main import flask_app\n return flask_app.config[key]\n except:\n environment = get_environment()\n #Load settings from the corresponding class\n if environment == Config.ENV_PRODUCTION:\n obj = ProductionConfig()\n else:\n obj = TestingConfig()\n return getattr(obj, key)",
"def env(key, default=None, required=False):\n try:\n value = os.environ[key]\n return ast.literal_eval(value)\n except (SyntaxError, ValueError):\n return value\n except KeyError:\n if default or not required:\n return default\n raise ImproperlyConfigured(\n \"Missing required environment variable '%s'\" % key)",
"def from_envvar(self, variable_name, silent=False):\n\t\tpaths = os.environ.get(variable_name)\n\t\tif not paths:\n\t\t\tif silent:\n\t\t\t\treturn False\n\t\t\traise RuntimeError('The environment variable %r is not set '\n\t\t\t\t\t'and as such configuration could not be '\n\t\t\t\t\t'loaded. Set this variable and make it '\n\t\t\t\t\t'point to a configuration file' %\n\t\t\t\t\tvariable_name\n\t\t\t\t)\n\t\trv = False\n\t\tfor path in paths.split(os.pathsep):\n\t\t\tpath = path.strip()\n\t\t\tif path:\n\t\t\t\trv = self.from_pyfile(path, silent=silent)\n\t\treturn rv",
"def get_env_setting(setting):\r\n try:\r\n return environ[setting]\r\n except KeyError:\r\n error_msg = \"Set the %s env variable\" % setting\r\n raise ImproperlyConfigured(error_msg)"
]
| [
"0.6596632",
"0.6080266",
"0.60578686",
"0.60304",
"0.5918862",
"0.58581537",
"0.58541805",
"0.57985073",
"0.5791923",
"0.5786983",
"0.5778541",
"0.56761557",
"0.56284994",
"0.562235",
"0.56174785",
"0.5572147",
"0.55627424",
"0.5561496",
"0.55554324",
"0.5537923",
"0.5531441",
"0.5529112",
"0.54549605",
"0.54466736",
"0.5443057",
"0.542941",
"0.54258096",
"0.54159445",
"0.5406234",
"0.5394923"
]
| 0.62398374 | 1 |
Parse some extra information out of the bibtex database so that we can include it in the webpage. | def extra_bibparse(db):
for key,entry in db.entries.items():
for auth in entry.persons["author"]:
if ("Harrison" not in auth.first_names or
"Chapman" not in auth.last_names):
entry.add_person(auth, "otherauthor") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _extract_core_biblio(self, bib):\n try:\n pubnumber = bib_scalar(bib, 'pubnumber')\n pubdate = datetime.strptime(bib_scalar(bib, 'pubdate'), '%Y%m%d')\n fam_raw = bib_scalar(bib, 'family_id')\n family_id = int(fam_raw) if fam_raw != None else fam_raw\n assign_applic_raw = bib.get('assign_applic')\n assign_applic = '|'.join(assign_applic_raw) if len(assign_applic_raw) > 0 else \"\"\n except KeyError, exc:\n raise RuntimeError(\"Document is missing mandatory biblio field (KeyError: {})\".format(exc))\n if len(pubnumber) == 0:\n raise RuntimeError(\"Document publication number field is empty\")\n\n return family_id, pubdate, pubnumber, assign_applic",
"def _populate(self):\n\n # Assume the first word is what we want, and we can find well formed years\n # This sucks, but will work for these ones.\n # Roll on bibtex for citations in the CIM.\n\n citation_detail = self.doc.citation_detail\n author = citation_detail.split(',')[0]\n match = '([^\\w])19|20\\d\\d([^\\w])*?'\n m = re.search(match, citation_detail)\n if m:\n year = m.group(0)\n else:\n year = None\n\n # one error in existing es-doc content to be fixed:\n if 'van Vuuren DP' in author:\n author = 'van Vuuren'\n print 'applying vv fix'\n\n self.year = int(year)\n\n # We assume that this table will have entries which ne\n\n # I use the first three letters of a an authors name, and for\n # three or more authors, EA, and then the year for my bibtex citation string\n self.citeguess = author[0:3] + 'EA' + year[2:]\n # This is what will appear in the table:\n self.citestring = '%s et al. (%s)' % (author, year)\n # Keep this for a reference list for checking against the eventual bibtex reference list.\n self.text = citation_detail",
"def parse_bibtex(self, data: str) -> Dict:\n\n new_bib = [line for line in data.splitlines() if \"= ,\" not in line]\n new_bib = \"\\n\".join(new_bib)\n bib_db: bibtexparser.bibdatabase.BibDatabase = bibtexparser.loads(new_bib)\n result = dict()\n for entry in bib_db.entries:\n osti_id = entry[\"ID\"].split(\"_\")[1]\n result[osti_id] = entry\n return result",
"def _extract_detailed_biblio(self, bib, doc_id, new_classes, new_titles, pubnumber):\n if self.load_titles:\n\n try:\n title_languages = bib['title_lang']\n title_strings = bib['title']\n\n unique_titles = dict()\n for title_lang, title in zip(title_languages, title_strings):\n if title_lang in unique_titles:\n if len(title) < 15:\n continue\n title = min(title, unique_titles[title_lang][2])\n unique_titles[title_lang] = (doc_id, title_lang, title )\n\n new_titles.extend(unique_titles.values())\n\n except KeyError:\n logger.warn(\n \"KeyError detected when processing titles for {}; title language or text data may be missing\".format(\n pubnumber))\n if self.load_classifications:\n\n for system_key in ('ipc', 'ecla', 'ipcr', 'cpc'):\n try:\n for classif in bib[system_key]:\n new_classes.append((doc_id, classif, DocumentClass.bib_dict[system_key] ))\n except KeyError:\n logger.warn(\"Document {} is missing {} classification data\".format(pubnumber, system_key))",
"def parsed2Bibtex(parsed):\n bibtex = '@'+parsed['ENTRYTYPE']+'{'+parsed['ID']+\",\\n\"\n\n for field in [i for i in sorted(parsed) if i not in ['ENTRYTYPE', 'ID']]:\n bibtex += \"\\t\"+field+\"={\"+parsed[field]+\"},\\n\"\n bibtex += \"}\\n\\n\"\n return bibtex",
"def _parse_biography(self):\n data = {}\n self.right_column = self.content.find(\"div\", class_=\"rechteSpalte60\")\n heading = self.right_column.find(\"h3\")\n # The page of the second president hides the details information\n # and displays a biography instead. By selecting the second div,\n # we get the hidden div containing the MPs details.\n if not heading:\n self.right_column = self.content.find_all(\"div\", class_=\"rechteSpalte60\")[1]\n data.update(self._parse_dob_job())\n data.update(self._parse_political_mandates())\n data.update(self._parse_political_posts())\n data.update(self._parse_work_history())\n data.update(self._parse_education())\n return data",
"def meta_extract(doc):\n title_search = re.compile(r'(title:\\s*)(?P<title>.*(\\n *\\w.*)*)(\\nauthor:)', re.IGNORECASE)\n author_search = re.compile(r'(author:)(?P<author>.*)', re.IGNORECASE)\n translator_search = re.compile(r'(translator:)(?P<translator>.*)', re.IGNORECASE)\n illustrator_search = re.compile(r'(illustrator:)(?P<illustrator>.*)', re.IGNORECASE)\n title = re.search(title_search, doc).group('title')\n author = re.search(author_search, doc)\n translator = re.search(translator_search, doc)\n illustrator = re.search(illustrator_search, doc)\n if author: \n author = author.group('author')\n if translator:\n translator = translator.group('translator')\n if illustrator:\n illustrator = illustrator.group('illustrator')\n print \"Title: {}\".format(title)\n print \"Author(s): {}\".format(author)\n print \"Translator(s): {}\".format(translator)\n print \"Illustrator(s): {}\\n\".format(illustrator)\n # return title, author, illustrator, translator",
"def parse_article_info(\n pubmed_article, year_info_only, nlm_category, author_list, reference_list, parse_subs=False\n):\n medline = pubmed_article.find(\"MedlineCitation\")\n article = medline.find(\"Article\")\n\n if article.find(\"ArticleTitle\") is not None:\n title = stringify_children(article.find(\"ArticleTitle\")).strip() or \"\"\n else:\n title = \"\"\n\n if article.find(\"Journal/JournalIssue/Volume\") is not None:\n volume = article.find(\"Journal/JournalIssue/Volume\").text or \"\"\n else:\n volume = \"\"\n\n if article.find(\"Language\") is not None:\n languages = \";\".join([language.text for language in article.findall(\"Language\")])\n else:\n languages = \"\"\n\n if article.find(\"VernacularTitle\") is not None:\n vernacular_title = stringify_children(article.find(\"VernacularTitle\")).strip() or \"\"\n else:\n vernacular_title = \"\"\n\n if article.find(\"Journal/JournalIssue/Issue\") is not None:\n issue = article.find(\"Journal/JournalIssue/Issue\").text or \"\"\n else:\n issue = \"\"\n\n if volume == \"\":\n issue = \"\"\n else:\n issue = f\"{volume}({issue})\"\n\n if article.find(\"Pagination/MedlinePgn\") is not None:\n pages = article.find(\"Pagination/MedlinePgn\").text or \"\"\n else:\n pages = \"\"\n\n category = \"NlmCategory\" if nlm_category else \"Label\"\n if article.find(\"Abstract/AbstractText\") is not None:\n # parsing structured abstract\n if len(article.findall(\"Abstract/AbstractText\")) > 1:\n abstract_list = list()\n for abstract in article.findall(\"Abstract/AbstractText\"):\n section = abstract.attrib.get(category, \"\")\n if section != \"UNASSIGNED\":\n abstract_list.append(\"\\n\")\n abstract_list.append(abstract.attrib.get(category, \"\"))\n section_text = stringify_children(abstract).strip()\n abstract_list.append(section_text)\n abstract = \"\\n\".join(abstract_list).strip()\n else:\n abstract = (\n stringify_children(article.find(\"Abstract/AbstractText\")).strip() or \"\"\n )\n elif article.find(\"Abstract\") is not None:\n abstract = stringify_children(article.find(\"Abstract\")).strip() or \"\"\n else:\n abstract = \"\"\n\n authors_dict = parse_author_affiliation(medline)\n if not author_list:\n affiliations = \";\".join(\n [\n author.get(\"affiliation\", \"\")\n for author in authors_dict\n if author.get(\"affiliation\", \"\") != \"\"\n ]\n )\n authors = \";\".join(\n [\n author.get(\"lastname\", \"\") + \"|\" + author.get(\"forename\", \"\") + \"|\" +\n author.get(\"initials\", \"\") + \"|\" + author.get(\"identifier\", \"\")\n for author in authors_dict\n ]\n )\n else:\n authors = authors_dict\n journal = article.find(\"Journal\")\n journal_name = \" \".join(journal.xpath(\"Title/text()\"))\n\n pmid = parse_pmid(pubmed_article)\n doi = parse_doi(pubmed_article)\n references = parse_references(pubmed_article, reference_list)\n pubdate = date_extractor(journal, year_info_only)\n mesh_terms = parse_mesh_terms(medline, parse_subs=parse_subs)\n publication_types = parse_publication_types(medline)\n chemical_list = parse_chemical_list(medline)\n keywords = parse_keywords(medline)\n other_id_dict = parse_other_id(medline)\n journal_info_dict = parse_journal_info(medline)\n dict_out = {\n \"title\": title,\n \"issue\": issue,\n \"pages\": pages,\n \"abstract\": abstract,\n \"journal\": journal_name,\n \"authors\": authors,\n \"pubdate\": pubdate,\n \"pmid\": pmid,\n \"mesh_terms\": mesh_terms,\n \"publication_types\": publication_types,\n \"chemical_list\": chemical_list,\n \"keywords\": keywords,\n \"doi\": doi,\n \"references\": references,\n \"delete\": False,\n \"languages\": languages,\n \"vernacular_title\": vernacular_title\n }\n if not author_list:\n dict_out.update({\"affiliations\": affiliations})\n dict_out.update(other_id_dict)\n dict_out.update(journal_info_dict)\n return dict_out",
"def _parse_email_bib_data(cls, bib_data):\n pub_title = None\n pub_year = None\n\n # Get author name(s)\n parsed_bib = bib_data.split(\" - \")\n m_author = _utils.clean_string(parsed_bib[0])\n\n # Split further to get year and author\n if len(parsed_bib) > 1:\n parsed_b2 = parsed_bib[1].split(',')\n\n if len(parsed_b2) == 2:\n pub_title = _utils.clean_string(parsed_b2[0])\n try:\n pub_year = int(_utils.clean_string(parsed_b2[1]))\n except ValueError:\n pub_year = None\n\n else:\n try:\n pub_year = int(_utils.clean_string(parsed_b2[0]))\n except ValueError:\n pub_year = None\n pub_title = _utils.clean_string(parsed_b2[0])\n\n return {\n 'author': m_author,\n 'pub_title': pub_title,\n 'pub_year': pub_year\n }",
"def info(entry: BibItem) -> str:\n return \"{title}{author}{date}\".format(\n title=(\n \"Title: {}\\n\".format(re.sub(r\"[}{]\", \"\", entry[\"title\"]))\n if \"title\" in entry\n else \"\"\n ),\n author=(\n \"Author{plural}: {author}\\n\".format(\n plural=\"s\" if len(entry[\"author\"]) > 1 else \"\",\n author=\"; \".join(entry[\"author\"]),\n )\n if \"author\" in entry\n else \"\"\n ),\n date=(\n \"Year: {}\\n\".format(entry[\"date\"].split(\n \"-\")[0]) if \"date\" in entry else \"\"\n ),\n )",
"def __init__(self):\n self.bib_database = BibDatabase()\n #: Callback function to process BibTeX entries after parsing, for example to create a list from a string with\n #: multiple values. By default all BibTeX values are treated as simple strings. Default: `None`.\n self.customization = None\n\n #: Ignore non-standard BibTeX types (`book`, `article`, etc). Default: `True`.\n self.ignore_nonstandard_types = True\n\n #: Sanitise BibTeX field names, for example change `url` to `link` etc. Field names are always converted to\n #: lowercase names. Default: `True`.\n self.homogenise_fields = True\n\n # On some sample data files, the character encoding detection simply\n # hangs We are going to default to utf8, and mandate it.\n self.encoding = 'utf8'\n\n # pre-defined set of key changes\n self.alt_dict = {\n 'keyw': 'keyword',\n 'keywords': 'keyword',\n 'authors': 'author',\n 'editors': 'editor',\n 'url': 'link',\n 'urls': 'link',\n 'links': 'link',\n 'subjects': 'subject'\n }\n\n self.replace_all_re = re.compile(r'((?P<pre>\"?)\\s*(#|^)\\s*(?P<id>[^\\d\\W]\\w*)\\s*(#|$)\\s*(?P<post>\"?))', re.UNICODE)",
"def make_bibtex(self):\n\n\t\t# bib = requests.request('GET', 'http://dx.doi.org/' + self.doi, ",
"def extractInfo(Link):\r\n response = urlopen(Link)\r\n html = response.read()\r\n #LinkInfo = ds.Links()\r\n #html = refinehtmltags(html)\r\n pagetitle = html[html.find('<title>') + 7 : html.find('</title>')]\r\n startindex = html.find('<meta name=\"description\" content=\"')\r\n desc = html[startindex + 34 : html.find('\"',startindex + 38)]\r\n print pagetitle\r\n print desc\r\n #### Use the links to\r\n #### Extract the information as\r\n #### pagetitle\r\n #### description\r\n #return LinkInfo\r",
"def parse_bib_from_bibtex(filename):\n\tentry_regex = r'''\n\t\t\t(?msx) # flags: multi-line, dot-match-all, verbose\n\t\t\t^@\\w+\\{ # start of line, item type\n\t\t\t.*? # content, can span multiple lines, non-greedy\n\t\t\t^\\} # start of line, closing parens\n\t\t\t'''\n\tattr_regex = r'''\n\t\t\t(?ix) # flags: ignore-case, verbose\n\t\t\t\\s* # leading space\n\t\t\t%s # what to match: author, title, or year\n\t\t\t\\s*=\\s* # space and equals\n\t\t\t[{\\\"'] # opening parens\n\t\t\t(.*) # the actual author, title, or year\n\t\t\t[}\\\"'] # closing parens or quote\n\t\t\t\\,? # optional comma\n\t\t\t'''\n\tparse_func = make_parse_func(attr_regex % \"author\", attr_regex % \"title\", \n\t attr_regex % \"year\", \" and \")\n\treturn parse_bib(filename, entry_regex, parse_func)",
"def _process_journal_info(el):\n assert len(el) == 1 # We only expect one journal record per article\n el = el[0]\n\n title = el.xpath('string(Title)')\n abbreviation = el.xpath('string(ISOAbbreviation)')\n # Some journals will have an NlmUniqueID and no ISSN. In every case where\n # an NlmUniqueID is present it will be used instead of the ISSN\n issn = el.xpath('string(ISSN)') or None\n # Date parsing should be interesting - there's no guarantee on what\n # fields will be present so I may need to do some ugly stuff\n pub_date_el = el.xpath('JournalIssue/PubDate')[0]\n date_elements = set([c.tag for c in pub_date_el.getchildren()])\n\n final_pub_date = {'Year': 1900, 'Month': 1, 'Day': 1}\n if 'MedlineDate' in date_elements:\n date_text = pub_date_el.xpath('string(MedlineDate)')\n year = re.search(YEAR_RE_PATTERN, date_text)\n if year is None:\n # XXX Give up and debug this later. This condition didn't\n # appear in the test data but it could come up when\n # parsing real data\n pass\n else:\n year = year.group(0)\n # Maybe we can guess a month, too\n month = 1\n for k in DATE_TRANSLATIONS.keys():\n # I can't tell if short names are preferred\n # to long names but this shouldn't matter in the end\n if k in date_text:\n month = DATE_TRANSLATIONS[k]\n break\n # Let's leave the date alone\n final_pub_date.update(Year=year, Month=month)\n else:\n pub_date = {}\n # Need to build up a date with as much information as we've got\n for k in date_elements.intersection(('Year', 'Month', 'Day')):\n v = pub_date_el.xpath(\"string(%s)\" % k)\n if v in DATE_TRANSLATIONS:\n v = DATE_TRANSLATIONS[v]\n pub_date[k] = int(v)\n final_pub_date.update(pub_date)\n\n return {'title': title,\n 'abbreviation': abbreviation,\n 'id': issn,\n 'id_type': 'issn',\n 'pub_date': final_pub_date,\n }",
"def process_article(self, title, text, template = 'Infobox person'):\n \n # Create a parsing object\n wikicode = mwparserfromhell.parse(text)\n \n # Search through templates for the template\n matches = wikicode.filter_templates(matches = template)\n raw_year_string = 'EMPTY'\n birth_year = 'EMPTY'\n infobox = ''\n if len(matches) >= 1:\n # Extract information from infobox\n for match in matches:\n infobox = str(match)\n for param in match.params:\n if param.name.strip_code().strip() == 'birth_date':\n raw_year_string = str(param.value)\n birth_year = self.get_birth_year(raw_year_string)\n summary = self.get_summary(wikicode.strip_code().strip())\n return (title, birth_year, summary, raw_year_string, infobox)",
"def html_reader(input_dir):\r\n #read data from the html file\r\n with open(input_dir,'r') as html_file:\r\n content = html_file.read()\r\n content = (content.split('\\n'))[4:-4]\r\n num = re.compile(\"(.*\\t\\d.*)|(\\d*\\d\\.\\d*)\")\r\n information = []\r\n for i in range(len(content)):\r\n if num.match(content[i])==None:\r\n information.append(content[i])\r\n information = information[:-1]\r\n #data parsing\r\n Date = re.compile('( ?CACM|June)')\r\n Meta = re.compile(\"(CA\\d\\d\\d\\d\\d\\d|June)\")\r\n #get date and meta index\r\n for i in range(len(information)):\r\n if Date.match(information[i])!=None:\r\n index_date = i\r\n if Meta.match(information[i])!=None:\r\n index_meta =i\r\n content = information[:index_date]\r\n others = information[index_date+2:index_meta]\r\n for i in range(len(content)):\r\n if content[i]==\"\":\r\n title = content[:i]\r\n abstract = content[i+1:]\r\n break\r\n #get author and other\r\n author = []\r\n other = []\r\n for i in range(len(others)):\r\n if others[i]==\"\":\r\n if re.match(\"[A-Z].*, ?[A-Z].*\\..*\",others[0]) != None:\r\n author = others[:i]\r\n other = others[i+1:]\r\n else:\r\n other = others\r\n break\r\n for i in range(len(author)):\r\n if re.match(\"[A-Z].*, ?[A-Z].*\\..*\",author[i]) != None:\r\n name = author[i].split(\",\")\r\n author[i] = (name[1]+name[0])\r\n author[i] = author[i].replace(\" \",\"\")\r\n author[i] = author[i].replace(\"\\t\",\"\")\r\n author[i] = author[i].lower()\r\n\r\n #parse date\r\n date = []\r\n date.append(re.search(\"19\\d\\d\", information[index_date]).group())\r\n date.append(re.search(\"(January|February|March|April|May|June|JUly|July|August|September|October|November|December)\",information[index_date]).group().lower())\r\n\r\n #parse meta data\r\n meta = []\r\n meta.append(re.search(\"CA\\d\\d\\d\\d\\d\\d\\w?\",information[index_meta]).group().lower())#0\r\n meta.append(re.search(\"[a-z0-9] [A-Z]{2}[A-Z]?\",information[index_meta]).group()[2:].lower())#1\r\n meta.append(re.search(\"(January|February|March|April|May|June|JUly|July|August|September|October|November|December)\",information[index_meta]).group().lower())#2\r\n meta.append(re.search(\"\\w \\d\\d?\",information[index_meta]).group()[2:])#3\r\n meta.append(re.search(\"\\d?\\d:\\d\\d\",information[index_meta]).group())#4\r\n meta.append(re.search(\"(AM|PM)\",information[index_meta]).group().lower())#5\r\n meta.append(re.search(\"19\\d\\d\",information[index_meta]).group())#6\r\n\r\n #build corpus\r\n corpus = set()\r\n lemmatizer = WordNetLemmatizer()\r\n for i in range(len(title)):\r\n title[i] = re.sub(\"\\(|\\)|-|\\d\\d?\\d?|:|/|\\.|`|\\?\",\" \",title[i])\r\n words = word_tokenize(title[i])\r\n for word in words:\r\n normal_word = word.lower()\r\n if normal_word not in stopwords.words(\"english\"):\r\n corpus.add(lemmatizer.lemmatize(normal_word))\r\n\r\n for i in range(len(abstract)):\r\n abstract[i] = re.sub(\"\\(|\\)|-|\\d\\d?\\d?|:|/|\\.|`|\\?|,\",\" \",abstract[i])\r\n words = word_tokenize(abstract[i])\r\n for word in words:\r\n normal_word = word.lower()\r\n if normal_word not in stopwords.words(\"english\"):\r\n corpus.add(lemmatizer.lemmatize(normal_word))\r\n\r\n for i in range(len(other)):\r\n other[i] = re.sub(\"\\(|\\)|-|\\d\\d?\\d?|:|/|\\.|`|\\?|,\",\" \",other[i])\r\n words = word_tokenize(other[i])\r\n for word in words:\r\n normal_word = word.lower()\r\n if normal_word not in stopwords.words(\"english\"):\r\n corpus.add(lemmatizer.lemmatize(normal_word))\r\n\r\n corpus = list(corpus)\r\n\r\n return paper(author= author, other= other, metadata= meta,date = date,title = title,abstract = abstract,id=int(input_dir[-9:-5]),corpus = corpus)",
"def get_bare_file(filename):\n \"\"\" for a given entry, finds all of the info we want to display \"\"\"\n f = open(filename, 'r')\n str = f.read()\n str = str.decode('utf-8')\n e = {}\n try: e['title'] = re.search('(?<=title:)(.)*', str).group()\n except: pass\n try: e['slug'] = re.search('(?<=slug:)(.)*', str).group()\n except: pass\n try: e['summary'] = re.search('(?<=summary:)(.)*', str).group()\n except: pass\n try:\n e['content'] =re.search('(?<=content:)((?!category:)(?!published:)(.)|(\\n))*', str).group()\n if e['content'] == None:\n e['content'] = re.search('(?<=content:)((.)|(\\n))*$', str).group()\n except:\n pass\n try:\n e['published'] = re.search('(?<=published:)(.)*', str).group()\n except: pass\n try: e['author'] = re.search('(?<=author:)(.)*', str).group()\n except: pass\n try: e['category'] = re.search('(?<=category:)(.)*', str).group()\n except: pass\n try: e['url'] = re.search('(?<=url:)(.)*', str).group()\n except: pass\n try:\n e['uid'] = re.search('(?<=u-uid:)(.)*', str)\n if e['uid']:\n e['uid'] = e['uid'].group()\n else:\n e['uid'] = re.search('(?<=u-uid)(.)*', str).group()\n except: pass\n try: e['time-zone'] = re.search('(?<=time-zone:)(.)*', str).group()\n except: pass\n try: e['location'] = re.search('(?<=location:)(.)*', str).group()\n except: pass\n try: e['syndication'] = re.search('(?<=syndication:)(.)*', str).group()\n except: pass\n try: e['location_name'] = re.search('(?<=location-name:)(.)*', str).group()\n except: pass\n try: e['in_reply_to'] = re.search('(?<=in-reply-to:)(.)*', str).group()\n except:pass\n return e",
"def _parse_details_page(self, page, kg_id):\n item = KGItem(int(kg_id))\n title = page.find(\".//title\").text.strip()\n title = H1_REXP.match(title).groups()[0]\n if \" aka \" in title:\n (item.orig_title, item.aka_title) = title.split(' aka ')[0:2]\n elif \" AKA \" in title:\n (item.orig_title, item.aka_title) = title.split(' AKA ')[0:2]\n else:\n item.orig_title = title\n item.country = page.find(\n \".//table[@class='main']/tr/td[@class='outer']/h1/img\").get(\"alt\")\n\n table = list(page.findall(\".//table[@width='750']\"))[0]\n for row in (x for x in list(table.findall('tr'))\n if len(x.getchildren()) != 1):\n rowhead = row.find(\".//td[@class='rowhead']\")\n # For some reason 'bool(rowhead)' evaluates to 'False' even if\n # it is not 'None'... Don't ask me why :-/\n if rowhead != None:\n torrent_link = row.findall(\".//a\")[0]\n torrent_name = torrent_link.text.strip()\n torrent_url = torrent_link.get('href')\n else:\n heading = row.find(\".//td[@class='heading']\").text.strip()\n if heading == 'Internet Link':\n item.imdb_id = self._get_imdb_id(row)\n elif heading == 'Director / Artist':\n item.director = unicode(row.find(\".//a\").text)\n elif heading == 'Year':\n item.year = row.find(\".//a\").text\n elif heading == 'Genres':\n item.genres = [x.text for x in row.findall(\".//a\")\n if x.text]\n elif heading == 'Language':\n item.language = row.find(\n \".//td[@align='left']\").text.strip()\n elif heading == 'Subtitles':\n # TODO: Get subtitles. How to handle included/external subs?\n pass\n elif heading == 'Source':\n try:\n item.source = row.find(\".//td[@align='left']\"\n ).text.strip()\n except AttributeError:\n item.source = None\n\n if FILENAME_REXP.match(torrent_name):\n item.files = [unicode(\n FILENAME_REXP.match(torrent_name).groups()[0])]\n else:\n torrent = self._session.get(KG_URL + torrent_url).content\n item.files = self._get_files_from_torrent(torrent)\n\n return item",
"def parse_api(self, soup):\n pdict = {}\n pdict[\"has_publication\"] = False\n pdict[\"has_print\"] = False\n self.search_doi(soup)\n\n article_meta = soup.entry\n # remove unnecessary line break\n pdict[\"abstract\"] = get_string(article_meta, \"summary\").replace(\n \"\\n\", \" \"\n )\n print(repr(article_meta.summary.get_text(strip=True)))\n # sometimes the arXiv article title has unnecessary linebreak\n pdict[\"title\"] = get_string(article_meta, \"title\").replace(\"\\n \", \"\")\n pdict[\"title_latex\"] = pdict[\"title\"]\n\n pub_date = datetime.strptime(\n article_meta.updated.string, \"%Y-%m-%dT%H:%M:%SZ\"\n )\n pdict[\"online_year\"] = str(pub_date.year)\n pdict[\"online_month\"] = str(pub_date.month)\n pdict[\"online_day\"] = str(pub_date.day)\n\n author = []\n for name in article_meta.find_all(\"name\"):\n name_ = re.match(r\"([\\s\\S]+) (\\w+)\", name.string)\n author.append([name_.group(2), name_.group(1)])\n pdict[\"author\"] = author\n return pdict",
"def list_publications(bib_format=\"dict\"):\n\n def get_bibtex(key, value):\n total_keys = [\n \"title\",\n \"journal\",\n \"volume\",\n \"issue\",\n \"number\",\n \"pages\",\n \"numpages\",\n \"year\",\n \"month\",\n \"publisher\",\n \"url\",\n \"doi\",\n \"issn\",\n ]\n bibtex_str = (\n \"@article{\"\n + key\n + \",\\n\"\n + \" author={\"\n + \" and \".join(value[\"author\"])\n + \"},\\n\"\n )\n for key in total_keys:\n if key in value.keys():\n bibtex_str += \" \" + key + \"={\" + value[key] + \"},\\n\"\n bibtex_str += \"}\\n\"\n return bibtex_str\n\n def get_apa(value):\n apa_str = \" & \".join(value[\"author\"])\n if \"year\" in value.keys():\n apa_str += \" (\" + value[\"year\"] + \"). \"\n if \"title\" in value.keys():\n apa_str += value[\"title\"] + \". \"\n if \"journal\" in value.keys():\n apa_str += value[\"journal\"] + \", \"\n if \"volume\" in value.keys():\n apa_str += value[\"volume\"] + \", \"\n if \"pages\" in value.keys():\n apa_str += value[\"pages\"] + \". \"\n if \"doi\" in value.keys():\n apa_str += \"doi: \" + value[\"doi\"] + \"\\n\"\n return apa_str\n\n publication_dict = s.publication_lst\n if bib_format.lower() == \"dict\":\n return publication_dict\n elif bib_format.lower() == \"bibtex\":\n total_str = \"\"\n for pub in publication_dict:\n for key, value in pub.items():\n total_str += get_bibtex(key, value)\n return total_str\n elif bib_format.lower() == \"apa\":\n total_str = \"\"\n for pub in publication_dict:\n for key, value in pub.items():\n total_str += get_apa(value)\n return total_str\n else:\n raise ValueError(\"Supported Bibformats are ['dict', 'bibtex', 'apa']\")",
"def extract(self, data):",
"def parse_journal_article_record(root) -> dict:\n\n # print(\"Root\", root)\n # pmid = root.find(\"PMID\").text\n # print(\"PMID\", pmid)\n # quit()\n\n doc = {\n \"abstract\": \"\",\n \"pmid\": \"\",\n \"title\": \"\",\n \"authors\": [],\n \"pub_date\": \"\",\n \"journal_iso_title\": \"\",\n \"journal_title\": \"\",\n \"doi\": \"\",\n \"compounds\": [],\n \"mesh\": [],\n }\n\n doc[\"pmid\"] = root.xpath(\".//PMID/text()\")[0]\n\n doc[\"title\"] = next(iter(root.xpath(\".//ArticleTitle/text()\")), \"\")\n\n # TODO https:.//stackoverflow.com/questions/4770191/lxml-etree-element-text-doesnt-return-the-entire-text-from-an-element\n atext = next(iter(root.xpath(\".//Abstract/AbstractText/text()\")), \"\")\n\n for abstracttext in root.xpath(\".//Abstract/AbstractText\"):\n abstext = node_text(abstracttext)\n\n label = abstracttext.get(\"Label\", None)\n if label:\n doc[\"abstract\"] += f\"{label}: {abstext}\\n\"\n else:\n doc[\"abstract\"] += f\"{abstext}\\n\"\n\n doc[\"abstract\"] = doc[\"abstract\"].rstrip()\n\n doc[\"authors\"] = []\n for author in root.xpath(\".//Author\"):\n last_name = next(iter(author.xpath(\"LastName/text()\")), \"\")\n first_name = next(iter(author.xpath(\"ForeName/text()\")), \"\")\n initials = next(iter(author.xpath(\"Initials/text()\")), \"\")\n if not first_name and initials:\n first_name = initials\n doc[\"authors\"].append(f\"{last_name}, {first_name}\")\n\n pub_year = next(iter(root.xpath(\".//Journal/JournalIssue/PubDate/Year/text()\")), None)\n pub_mon = next(iter(root.xpath(\".//Journal/JournalIssue/PubDate/Month/text()\")), \"Jan\")\n pub_day = next(iter(root.xpath(\".//Journal/JournalIssue/PubDate/Day/text()\")), \"01\")\n medline_date = next(\n iter(root.xpath(\".//Journal/JournalIssue/PubDate/MedlineDate/text()\")), None\n )\n\n pub_date = process_pub_date(doc[\"pmid\"], pub_year, pub_mon, pub_day, medline_date)\n\n doc[\"pub_date\"] = pub_date\n doc[\"journal_title\"] = next(iter(root.xpath(\".//Journal/Title/text()\")), \"\")\n doc[\"joural_iso_title\"] = next(iter(root.xpath(\".//Journal/ISOAbbreviation/text()\")), \"\")\n doc[\"doi\"] = next(iter(root.xpath('.//ArticleId[@IdType=\"doi\"]/text()')), None)\n\n doc[\"compounds\"] = []\n for chem in root.xpath(\".//ChemicalList/Chemical/NameOfSubstance\"):\n chem_id = chem.get(\"UI\")\n doc[\"compounds\"].append({\"id\": f\"MESH:{chem_id}\", \"name\": chem.text})\n\n compounds = [cmpd[\"id\"] for cmpd in doc[\"compounds\"]]\n doc[\"mesh\"] = []\n for mesh in root.xpath(\".//MeshHeading/DescriptorName\"):\n mesh_id = f\"MESH:{mesh.get('UI')}\"\n if mesh_id in compounds:\n continue\n doc[\"mesh\"].append({\"id\": mesh_id, \"name\": mesh.text})\n\n return doc",
"def test_html_metadata_dc_case() -> None:\n\n snippet = \"\"\"\n <html>\n <head>\n <meta name=\"DC.Citation.Issue\" content=\"123\"/>\n </head>\n <body>Hi.</body>\n </html>\"\"\"\n\n meta = html_extract_biblio(\"http://example.org\", HTMLParser(snippet))\n assert meta is not None\n assert meta.issue == \"123\"",
"def parse_bib(filename, entry_regex, parse_func):\n\twith open(filename) as f:\n\t\treturn filter(None, (parse_func(item.group()) \n\t\t for item in re.finditer(entry_regex, f.read())))",
"def _parse_article(self, a_cookie, a_ua):\n url = \"https://seekingalpha.com/article/%s\" % self._id\n r = safe_request(url, {})\n r_login = safe_request(url, a_cookie)\n\n soup_log = BeautifulSoup(r_login.text, 'html.parser')\n # Stops process if article invalid\n primary_about = soup_log.find_all(\"a\", href=True, sasource=\"article_primary_about\")\n if len(primary_about) != 1:\n # Excludes non-single-ticker articles\n print(\"Invalid Article\")\n self.valid = False\n return\n else:\n self.ticker = primary_about[0].text.split()[-1][1:-1]\n\n # Gets all includes and author\n about = soup_log.find_all(\"a\", href=True)\n for a in about:\n if 'sasource' in a.attrs:\n if a.attrs['sasource'] == \"article_about\":\n self.includes += a.text + \",\"\n elif a.attrs['sasource'] == \"auth_header_name\":\n self.author += a.text + \",\"\n\n self.includes = self.includes[:-1]\n self.author = self.author[:-1]\n self.title = soup_log.find_all('h1')[0].text\n self.pub_date = soup_log.find_all('time', itemprop=\"datePublished\")[0]['content'][:10]\n\n # Get Full Article Text\n name_box = BeautifulSoup(r.text, 'html.parser').find_all('p')\n print(name_box)\n try:\n disc_idx = list(filter(lambda i: 'id' in name_box[i].attrs and name_box[i]['id'] == 'a-disclosure',\n range(len(name_box))))[0]\n except IndexError:\n disc_idx = len(name_box)\n self.text = ''.join(map(lambda x: x.text + \"\\n\", name_box[:disc_idx]))",
"def parse(self, is_president):\n mp = {}\n self.content = self.page.find(id=\"content\")\n\n salutation = self.content.find(id=\"inhalt\").text.strip()\n if is_president:\n salutation = salutation[: salutation.rfind(\" - \")]\n mp[\"is_president\"] = is_president\n mp[\"salutation\"] = salutation\n mp[\"in_committees\"] = self.page.find(\"a\", href=\"#tab-Ausschuesse\") is not None\n mp.update(self._parse_picture())\n mp.update(self._parse_contact_information())\n mp.update(self._parse_biography())\n return mp",
"def getaandainfo(url, html):\n\n from bs4 import BeautifulSoup\n import re\n import datetime\n from astroph import preprint\n\n paper = preprint()\n paper.url = url\n # Remove all the muck that screws up the BeautifulSoup parser\n # Will fail on PDF submission, so take care of that exception first\n try:\n fhtml =re.sub(re.compile(\"<!--.*?-->\",re.DOTALL),\"\",html)\n soup = BeautifulSoup(fhtml)\n paper.errors = \"0\"\n except:\n paper.errors = \"1\"\n paper.title = \"Error Grabbing Title\"\n paper.author = \"Error Grabbing Authors\"\n paper.numauth = \"1\"\n paper.date = \"Error Grabbing Date\"\n paper.abstract = \"Error Grabbing Abstract\"\n paper.sources = \" \"\n paper.subject = \"Error Grabbing Subject\"\n paper.comments = \"Error Grabbing Comments\"\n return paper\n \n # Grab the paper info\n \n ## Grab title\n try:\n paper.title = str(soup.find('h2', {'class':'title'}).text.encode(\"utf-8\"))\n paper.title = paper.title.rstrip(u'⋆')\n except:\n paper.errors = \"1\"\n paper.title = \"Error Grabbing Title\"\n \n print(paper.title + '\\n')\n \n ## Grab date\n try:\n date_str = str(soup.find('p', {'class':'history'}).text.encode(\"utf-8\"))\n \n pre_date_str = 'Accepted:'\n index_pre_date_str = date_str.index(pre_date_str)\n date_str = date_str[index_pre_date_str + len(pre_date_str):]\n \n date = datetime.datetime.strptime(date_str, '%d %B %Y')\n \n paper.date = date.strftime('%d %b %Y')\n except:\n paper.errors = \"0\"\n paper.date = \"Error Grabbing Date\"\n\n # print(date_str)\n # print(paper.date)\n\n ## Grab authors\n try:\n authors = soup.findAll('span', {'class':'author'})\n paper.numauth = len(authors)\n\n ### Convert authors to strings\n author_list = []\n for author in authors:\n author_list.append(author.text.encode(\"utf-8\"))\n\n paper.author = ', '.join(author_list[0:4])\n except:\n paper.errors = \"1\"\n paper.author = \"Error Grabbing Authors\"\n\n # print(paper.author)\n\n\n ## Grab abstract\n try:\n head_soup = soup.find('div',{'id':'contenu'}).find('div',{'id':'head'})\n abstract_ps = head_soup.findAll('p', {'class':None})\n \n abstract = ''\n for abstract_p in abstract_ps:\n abstract += abstract_p.text.encode(\"utf-8\") + '\\n'\n \n paper.abstract = abstract\n except:\n paper.errors = \"1\"\n paper.abstract = \"Error Grabbing Abstract\"\n\n # print(paper.abstract)\n\n ## Grab sources\n try:\n base_url = 'http://www.aanda.org'\n \n sources_list = []\n\n PDF_link_soup = soup.find('a',{'title':re.compile('PDF*')})\n PDF_link = str(PDF_link_soup)\n if PDF_link != 'None':\n PDF_text = PDF_link_soup['title']\n PDF_link = PDF_link.replace('href=\"', 'href=\"' + base_url)\n PDF_link = PDF_link.replace(PDF_text, 'PDF')\n PDF_link = PDF_link.replace(' title=\"PDF\"', '')\n sources_list.append(PDF_link)\n \n HTML_link = str(soup.find('a',{'title':'Full HTML'}))\n if HTML_link != 'None':\n HTML_link = HTML_link.replace('href=\"', 'href=\"' + base_url)\n HTML_link = HTML_link.replace(' class=\"current_doc\"', '')\n HTML_link = HTML_link.replace('Full HTML', 'HTML')\n HTML_link = HTML_link.replace(' title=\"HTML\"', '')\n sources_list.append(HTML_link)\n\n paper.sources = ' '.join(sources_list)\n except:\n paper.errors = \"0\"\n paper.sources = \" \"\n\n # print(paper.sources)\n\n ## Grab subject\n try:\n keywords = str(soup.find('div', {'class':'kword'}).text.encode(\"utf-8\"))\n keywords = keywords.replace('Key words:', '')\n \n paper.subject = keywords\n \n except:\n paper.errors = \"1\"\n paper.subject = \"Error Grabbing Subject\"\n \n return paper",
"def parse_book_record(root) -> dict:\n\n doc = {\n \"abstract\": \"\",\n \"pmid\": \"\",\n \"title\": \"\",\n \"authors\": [],\n \"pub_date\": \"\",\n \"journal_iso_title\": \"\",\n \"journal_title\": \"\",\n \"doi\": \"\",\n \"compounds\": [],\n \"mesh\": [],\n }\n\n doc[\"pmid\"] = root.xpath(\".//PMID/text()\")[0]\n\n doc[\"title\"] = next(iter(root.xpath(\".//BookTitle/text()\")))\n\n doc[\"authors\"] = []\n for author in root.xpath(\".//Author\"):\n last_name = next(iter(author.xpath(\"LastName/text()\")), \"\")\n first_name = next(iter(author.xpath(\"ForeName/text()\")), \"\")\n initials = next(iter(author.xpath(\"Initials/text()\")), \"\")\n if not first_name and initials:\n first_name = initials\n doc[\"authors\"].append(f\"{last_name}, {first_name}\")\n\n pub_year = next(iter(root.xpath(\".//Book/PubDate/Year/text()\")), None)\n pub_mon = next(iter(root.xpath(\".//Book/PubDate/Month/text()\")), \"Jan\")\n pub_day = next(iter(root.xpath(\".//Book/PubDate/Day/text()\")), \"01\")\n medline_date = next(\n iter(root.xpath(\".//Journal/JournalIssue/PubDate/MedlineDate/text()\")), None\n )\n\n pub_date = process_pub_date(pub_year, pub_mon, pub_day, medline_date)\n\n doc[\"pub_date\"] = pub_date\n\n for abstracttext in root.xpath(\".//Abstract/AbstractText\"):\n abstext = node_text(abstracttext)\n\n label = abstracttext.get(\"Label\", None)\n if label:\n doc[\"abstract\"] += f\"{label}: {abstext}\\n\"\n else:\n doc[\"abstract\"] += f\"{abstext}\\n\"\n\n doc[\"abstract\"] = doc[\"abstract\"].rstrip()\n\n return doc",
"def extractInfo(article):\n headline = article['headline']['main']\n url = article['web_url']\n keywords = article['keywords']\n locations = []\n for k in keywords:\n if k['name'] == \"glocations\":\n locations += [k['value']]\n return {'headline':headline, 'url':url, 'locations':locations}"
]
| [
"0.6140276",
"0.6118735",
"0.59877324",
"0.5980667",
"0.58826834",
"0.5873166",
"0.5846001",
"0.58183736",
"0.5650632",
"0.56477314",
"0.5636212",
"0.5559242",
"0.5546218",
"0.54933405",
"0.54741406",
"0.5439656",
"0.54392403",
"0.54090035",
"0.53947794",
"0.53875345",
"0.537348",
"0.53600574",
"0.53021103",
"0.5292582",
"0.52729994",
"0.5268483",
"0.5262836",
"0.524691",
"0.523952",
"0.52275306"
]
| 0.661685 | 0 |
calculate sane rectangular coordinates inside the axis limits | def sane_rect_coord(ax, xperc=[0.1, 0.4], yperc=[0.1, 0.9]):
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xdist = xlim[1] - xlim[0]
ydist = ylim[1] - ylim[0]
xs = [xlim[0] + xperc[0] * xdist, xlim[0] + xperc[0] * xdist,
xlim[0] + xperc[1] * xdist, xlim[0] + xperc[1] * xdist]
ys = [ylim[0] + yperc[0] * ydist, ylim[0] + yperc[1] * ydist,
ylim[0] + yperc[1] * ydist, ylim[0] + yperc[0] * ydist]
return xs, ys | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def internal_bounds(self) -> tuple[float, float, float, float]:\n xres, yres = self.res\n w, s, e, n = self.bounds\n y0, y1 = (n, s) if yres < 0 else (s, n)\n x0, x1 = (e, w) if xres < 0 else (w, e)\n return x0, y0, x1, y1",
"def check_extent(self):\n if self.lower_left.x > self.upper_right.x:\n dlx = self.lower_left.x\n self.lower_left.x = self.upper_right.x\n self.upper_right.y = dlx\n\n if self.lower_left.y > self.upper_right.y:\n dly = self.lower_left.y\n self.lower_left.y = self.upper_right.y\n self.upper_right.y = dly",
"def bounds(self) -> tuple[float, float, float, float]:\n transform = self.transform\n a, b, c, d, e, f, _, _, _ = transform\n if b == d == 0:\n xs = (c, c + a * self.width)\n ys = (f, f + e * self.height)\n else: # rotated\n c0x, c0y = c, f\n c1x, c1y = transform * (0, self.height)\n c2x, c2y = transform * (self.width, self.height)\n c3x, c3y = transform * (self.width, 0)\n xs = (c0x, c1x, c2x, c3x)\n ys = (c0y, c1y, c2y, c3y)\n return min(xs), min(ys), max(xs), max(ys)",
"def get_object_bounds(self):\n if len(self._object_bounds) == 0:\n # Nothing plotted yet\n return -.01, .01, -.01, .01\n xmins, xmaxs, ymins, ymaxs = np.array(self._object_bounds).T\n xmax = max(xmaxs.max(), xmins.max())\n xmin = min(xmins.min(), xmaxs.min())\n ymax = max(ymaxs.max(), ymins.max())\n ymin = min(ymins.min(), ymaxs.min())\n return xmin, xmax, ymin, ymax",
"def real_boundaries(self):\n return (self._points[0][1], self._points[0][3])",
"def adjust_position(self):\n\n # Adjust position for x-axis\n r = self.rect.x % 30\n if r != 0:\n if r <= 16:\n x = self.rect.x - r\n else:\n x = self.rect.x + (30 - r)\n\n else:\n x = self.rect.x\n\n # Adjust position for y-axis\n r = self.rect.y % 30\n if r != 0:\n if r <= 16:\n y = self.rect.y - r\n else:\n y = self.rect.y + (30 - r)\n else:\n y = self.rect.y\n\n return x, y",
"def bounds(self):\n return self.xmin, self.xmax, self.ymin, self.ymax",
"def _update_limits(self):\n if self.pos_x > self.max_x:\n self.max_x = self.pos_x\n if self.pos_y > self.max_y:\n self.max_y = self.pos_y\n if self.pos_x < self.min_x:\n self.min_x = self.pos_x\n if self.pos_y < self.min_y:\n self.min_y = self.pos_y",
"def calculate_leg_xy_limits(self, visualize=False):\n \n #Find the fixed plate position at the \"0\" point\n gonio_zero = copy.copy(self)\n gonio_zero.relative_sample_position = column([0.0, 0.0, 0.0]) #Tell the sample to be centered well.\n gonio_zero.getplatepos(0.0, 0.0, 0.0)\n fixed_plate_zero = np.copy(gonio_zero.fixed_plate)\n #This defines the center of the following matrices\n self.fixed_plate_zero = fixed_plate_zero\n \n #Now we generate a matrix of allowed positions around those points.\n self.leg_safe_xaxis = np.arange(-self.travel, self.travel, self.leg_safe_resolution)\n self.leg_safe_zaxis = np.copy(self.leg_safe_xaxis)\n\n #Create the \"safe zone\" array, initialized to False\n self.leg_safe_zone = np.zeros( (3, self.leg_safe_xaxis.size, self.leg_safe_zaxis.size), dtype=bool ) \n\n #Now make a reasonable approximation\n real_travel_x = 12.5\n real_travel_z = real_travel_x\n for leg in range(3):\n for i_x in range(self.leg_safe_xaxis.size):\n x = self.leg_safe_xaxis[i_x]\n if abs(x)<real_travel_x:\n for i_z in range(self.leg_safe_zaxis.size):\n z = self.leg_safe_zaxis[i_z]\n if abs(z)<real_travel_z:\n self.leg_safe_zone[leg, i_x, i_z] = True\n# #Upper left corner of leg A (0)\n# center = int(len(self.leg_safe_xaxis)/2)\n# self.leg_safe_zone[0, :, :] = False\n# self.leg_safe_zone[0, :center, :center] = True\n# self.leg_safe_zone[1, :, :] = False\n# self.leg_safe_zone[1, center:, 0:center] = True\n# self.leg_safe_zone[2, :, :center] = False\n\n\n if visualize:\n pylab.figure(0)\n pylab.hold(True)\n for leg in range(3):\n pylab.pcolor(self.leg_safe_xaxis+fixed_plate_zero[COORD_X, leg], self.leg_safe_zaxis+fixed_plate_zero[COORD_Z, leg], self.leg_safe_zone[leg, :, :].transpose())\n pylab.xlabel(\"x\")\n pylab.ylabel(\"z\")\n pylab.title(\"Allowable XZ leg positions for the 3 legs.\")\n pylab.draw()\n pylab.axis('equal')\n #pylab.show()",
"def _boundRect(self):\n self.upperleft = list(map(min, zip(*self.addresstamp)))\n self.bottomright = list(map(max, zip(*self.addresstamp)))\n self.upperright = [self.bottomright[0], self.upperleft[1]]\n self.bottomleft = [self.upperleft[0], self.bottomright[1]]\n (self.width, self.height) = (self.upperright[0] - self.bottomleft[0], self.bottomleft[1] - self.upperright[1])\n assert self.width >= 0\n assert self.height >= 0\n self.center = [self.upperleft[0] + self.width / float(2), self.upperleft[1] + self.height / float(2)]\n self.corners = [self.upperright, self.bottomleft, self.upperleft, self.bottomright]",
"def square_boundaries(px , py, pz, incx, incy, incz, min_x, min_y, min_z, max_x, max_y, max_z):\n\n if px < min_x or px > max_x: \n pcx = px - incx \n\n if py < min_y or py > max_y:\n pcy = py - incy \n\n if pz < min_z or pz > max_z:\n pcz = pz - incz \n\n return pcx, pcy, pcz",
"def limits(self):\n\n\t\treturn [\n\t\t\tmin(self.xvalues),\n\t\t\tmax(self.xvalues),\n\t\t\tmin(self.yvalues),\n\t\t\tmax(self.yvalues)]",
"def _compute_bounds(self, axis, view):\n return None",
"def appmag(x, xlim=(8, -4), ylim=(64, 192)):\n min_x, max_x = xlim\n min_y, max_y = ylim\n return min_y + (max_y - min_y) * (x - min_x) / (max_x - min_x)",
"def canvas_bounds(self) -> utils.BoxRegion:",
"def extents(self):\n x0, y0, width, height = self._rect_bbox\n xmin, xmax = sorted([x0, x0 + width])\n ymin, ymax = sorted([y0, y0 + height])\n return xmin, xmax, ymin, ymax",
"def _in_bounds(self, x, y):\r\n return 0 <= x < 8 and 0 <= y < 8",
"def _get_bounds(x, y, size):\n x = np.array(np.atleast_1d(x))\n y = np.array(np.atleast_1d(y))\n\n lower_x = np.rint(x - size[0]/2)\n lower_y = np.rint(y - size[1]/2)\n\n return np.stack((np.stack((lower_x, lower_x + size[0]), axis=1),\n np.stack((lower_y, lower_y + size[1]), axis=1)), axis=1).astype(int)",
"def bounds(self, pos):",
"def bounds(self):\n return self.min_col, self.min_row, self.max_col, self.max_row",
"def _rect_right(self):\n\treturn max(self.x, self.x + self.w)",
"def _xywh2min_max(box):\n x, y, w, h = box\n return np.array([x, y, x+w, y+h])",
"def _compute_equal_axes_ranges(x_min, x_max, y_min, y_max):\n\n x_axis_min, x_axis_max, y_axis_min, y_axis_max = x_min, x_max, y_min, y_max\n x_range, y_range = abs(x_max - x_min), abs(y_max - y_min)\n if x_range > y_range:\n y_center = (y_max + y_min) / 2\n y_axis_max = y_center + x_range / 2\n y_axis_min = y_center - x_range / 2\n else:\n x_center = (x_max + x_min) / 2\n x_axis_max = x_center + y_range / 2\n x_axis_min = x_center - y_range / 2\n\n return x_axis_min, x_axis_max, y_axis_min, y_axis_max",
"def get_bounds(self):\n bottom_right = np.asarray([self.coords[k][0] for k in range(self.dim)])\n upper_left = np.asarray([self.coords[k][-1] for k in range(self.dim)])\n return bottom_right, upper_left",
"def uv_bounds(self):\n umin, umax, vmin, vmax = breptools_UVBounds(self.topods_shape())\n bounds = Box(np.array([umin, vmin]))\n bounds.encompass_point(np.array([umax, vmax]))\n return bounds",
"def normalize_coords(xx, yy, width, height):\n xx = (2.0 / (width - 1.0)) * xx.float() - 1.0\n yy = (2.0 / (height - 1.0)) * yy.float() - 1.0\n return xx, yy",
"def get_raw_bounds(self) -> [Vector, Vector]:\n\t\tverts = np.array([v.co for mesh in self._meshes for v in mesh.data.vertices])\n\t\tbbox_min = Vector([*np.min(verts, axis=0)])\n\t\tbbox_max = Vector([*np.max(verts, axis=0)])\n\t\treturn bbox_min, bbox_max",
"def bounds(self):\n return (\n self.x, self.y,\n self.x, self.y\n )",
"def Extrema(self):\n ymin = np.min(self._corners[:, 1])\n xmin = np.min(self._corners[:, 0])\n ymax = np.max(self._corners[:, 1])\n xmax = np.max(self._corners[:, 0])\n return ymin, xmin, ymax, xmax",
"def getBoundingBox(self):\n lX, lY = self.lX(), self.lY()\n return min(lX), min(lY), max(lX), max(lY)"
]
| [
"0.6719406",
"0.6697193",
"0.6582746",
"0.65794367",
"0.65285176",
"0.6497397",
"0.6487475",
"0.6420139",
"0.64024025",
"0.6376628",
"0.63295823",
"0.6329149",
"0.63160974",
"0.63096553",
"0.6262491",
"0.62613326",
"0.62605053",
"0.62529624",
"0.6250592",
"0.6241038",
"0.62394524",
"0.622996",
"0.6219128",
"0.619488",
"0.6192974",
"0.6189298",
"0.618127",
"0.6177603",
"0.6171144",
"0.6160766"
]
| 0.68368167 | 0 |
Returns string rep of directions (capital X is pos x, lowercase x is neg x) | def __str__(self):
prev_directions = ""
if self.prev_step:
prev_directions = str(self.prev_step) + ' '
direction = '?'
for scale, name in zip(self.direction, 'xyz'):
if scale == 0:
continue
if scale == 1:
direction = name.upper()
else:
direction = name
return prev_directions + direction | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __str__(self):\n direction_symbol = constants._direction_number_to_direction_symbol[\n mathtools.sign(self.number)]\n return '{}{}'.format(direction_symbol, abs(self.number))",
"def get_directions():\n return [(1, 0), (0, 1), (-1, 0), (0, -1)]",
"def get_direction_letters(d):\n d_letters = ('N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE', 'SSE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW')\n x = int((d + 11.25) / 22.5) % 16\n return d_letters[x]",
"def legal_moves(self):\n moves = \"\"\n swappable = self.swappable_positions\n empty_position = self.get_position(0)\n\n for s in swappable:\n pos_diff = empty_position[0] - s[0], empty_position[1] - s[1]\n if pos_diff[0] > 0:\n moves += \"U\"\n elif pos_diff[0] < 0:\n moves += \"D\"\n elif pos_diff[1] > 0:\n moves += \"L\"\n elif pos_diff[1] < 0:\n moves += \"R\"\n\n return moves",
"def find_opposite(direction:str):\r\n if direction == 'u':\r\n return 'd'\r\n if direction == 'l':\r\n return 'r'\r\n if direction == 'r':\r\n return 'l'\r\n if direction == 'd':\r\n return 'u'",
"def _dir2str(direct):\n if direct == UP:\n return \"UP\"\n elif direct == DOWN:\n return \"DOWN\"\n elif direct == LEFT:\n return \"LEFT\"\n elif direct == RIGHT:\n return \"RIGHT\"\n else:\n return \"INVALID DIRECTION\"",
"async def direction(self, value) -> str:\n if value is None:\n return \"N\"\n\n direction_array = [\n \"N\",\n \"NNE\",\n \"NE\",\n \"ENE\",\n \"E\",\n \"ESE\",\n \"SE\",\n \"SSE\",\n \"S\",\n \"SSW\",\n \"SW\",\n \"WSW\",\n \"W\",\n \"WNW\",\n \"NW\",\n \"NNW\",\n \"N\",\n ]\n direction_str = direction_array[int((value + 11.25) / 22.5)]\n return self._translations[\"wind_dir\"][direction_str]",
"def possible_directions(valid_positions):\n if valid_positions == \"n\":\n print(\"You can travel: (N)orth.\")\n elif valid_positions == \"nes\":\n print(\"You can travel: (N)orth or (E)ast or (S)outh.\")\n elif valid_positions == \"es\":\n print(\"You can travel: (E)ast or (S)outh.\")\n elif valid_positions == \"sw\":\n print(\"You can travel: (S)outh or (W)est.\")\n elif valid_positions == \"ew\":\n print(\"You can travel: (E)ast or (W)est.\")\n elif valid_positions == \"ns\":\n print(\"You can travel: (N)orth or (S)outh.\")",
"def getDirection(a):\n try:\n if (int(a) == 0): return \"N\"\n elif (int(a) == 1): return \"S\"\n elif (int(a) == 2): return \"E\"\n elif (int(a) == 3): return \"W\"\n else: raise Exception(\"Invalid Action\")\n except Exception as err:\n print(err)\n exit()",
"def text_direction(self) -> str:\n return ''.join(word[0] for word in self.character_order.split('-'))",
"def choose_direction(direction, my_matrix):\r\n final_str_lst = []\r\n direction_set = set(direction) # converts the directions to a set in order\r\n # to eliminate double letters\r\n for char in direction_set:\r\n if char == 'u':\r\n strs_in_direction = down_to_up(my_matrix)\r\n elif char == 'd':\r\n strs_in_direction = up_to_down(my_matrix)\r\n elif char == 'r':\r\n strs_in_direction = left_to_right(my_matrix)\r\n elif char == 'l':\r\n strs_in_direction = right_to_left(my_matrix)\r\n elif char == 'w':\r\n strs_in_direction = lower_left_to_right(my_matrix)\r\n elif char == 'x':\r\n strs_in_direction = lower_right_to_left(my_matrix)\r\n elif char == 'y':\r\n strs_in_direction = upper_left_to_right(my_matrix)\r\n elif char == 'z':\r\n strs_in_direction = upper_right_to_left(my_matrix)\r\n for string in strs_in_direction:\r\n final_str_lst.append(string)\r\n return final_str_lst",
"def direction(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"direction\")",
"def translate_direction(self):\n xpart = math.sin(self.direction)\n ypart = math.cos(self.direction)\n if ypart > 0:\n print(\"oben \", end='')\n else:\n print(\"unten \", end='')\n if xpart > 0:\n print(\"rechts\")\n else:\n print(\"links\")",
"def get_direction(a, b):\r\n\tdiff_y = b[0] - a[0]\r\n\tdiff_x = b[1] - a[1]\r\n\r\n\tif diff_y != 0 and diff_x != 0:\r\n\t\treturn 'diagonal'\r\n\r\n\treturn 'vertical' if diff_y != 0 else 'horizontal'",
"def turn_clockwise(a):\r\n if a==\"N\":\r\n return \"E\"\r\n elif a==\"E\":\r\n return \"S\"\r\n elif a==\"S\":\r\n return \"W\"\r\n elif a==\"W\":\r\n return \"N\"",
"def create_directions_ingoing(self, pos=None):\n if pos is None:\n pos = self.draw.position\n all_directions = ((1, 0), (-1, 0), (0, 1), (0, -1), (-1, -1), (-1, 1), (1, 1), (1, -1))\n\n # filter returns an iterator, we have to materialize the filter and turn it into a tuple (or list or whatever...)\n return tuple(filter(lambda x: self.check_position_exists(pos.step(x)), all_directions))\n # all directions used to be a list, now it is a tuple. Seems to work fine...\n # filter needs the data to be filtered as a iterable container",
"def get_direction(curr_pos, next_pos):\n if curr_pos == next_pos:\n return 'CLEAN'\n\n v_dist = next_pos[0] - curr_pos[0]\n h_dist = next_pos[1] - curr_pos[1]\n\n if h_dist != 0:\n if h_dist < 0:\n return 'LEFT'\n else:\n return 'RIGHT'\n else:\n if v_dist < 0:\n return 'UP'\n else:\n return 'DOWN'",
"def __str__(self):\n output = \"0->\"\n for c in self.route:\n output += str(c) + \"->\"\n output += \"0\"\n return output",
"def get_direction(self):\n return self.actual_coordinates[2]",
"def direction(self) -> str:\n return pulumi.get(self, \"direction\")",
"def travel(direction, x, y):\n x_new = x\n y_new = y\n for i in range(len(direction)):\n test = direction[i].lower()\n if test == 'n':\n y_new += 1\n elif test == 's':\n y_new -= 1\n elif test == 'e':\n x_new += 1\n elif test == 'w':\n x_new -= 1\n return (x_new, y_new)",
"def strand_string(self):\n if self.is_forward():\n return '+'\n if self.is_reverse():\n return '-'\n return '.'",
"def valid_directions(x,y):\n North, South, West, East = walls(x,y)\n print ('You can travel: ',end='')\n if North:\n print ('(N)orth' ,end='')\n if East or West or South:\n print (' or' ,end=' ')\n if East:\n print ('(E)ast', end='')\n if West or South:\n print (' or' ,end=' ')\n if South:\n print('(S)outh', end='')\n if West:\n print (' or' ,end=' ')\n if West:\n print ('(W)est',end='')\n print('.')\n return North, South, West, East",
"def getDirection(self):\n if 'N' in str(self.trip_update.trip.trip_id):\n direction = 'northbound'\n if 'S' in str(self.trip_update.trip.trip_id):\n direction = 'southbound'\n return direction",
"def directions(self):\n return self.piece_behavior.directions",
"def __convert_to_turn(self, dir):\n if dir == 0: # up\n if self.orientation == 1:\n self.orientation = 0\n return ['L']\n elif self.orientation == 3:\n self.orientation = 0\n return ['R']\n elif self.orientation == 2:\n self.orientation = 0\n return ['L', 'L']\n elif self.orientation == 0:\n return []\n elif dir == 1: # right\n if self.orientation == 1:\n return []\n elif self.orientation == 3:\n self.orientation = 1\n return ['L', 'L']\n elif self.orientation == 2:\n self.orientation = 1\n return ['L']\n elif self.orientation == 0:\n self.orientation = 1\n return ['R']\n elif dir == 2: # down\n if self.orientation == 1:\n self.orientation = 2\n return ['R']\n elif self.orientation == 3:\n self.orientation = 2\n return ['L']\n elif self.orientation == 2:\n return []\n elif self.orientation == 0:\n self.orientation = 2\n return ['L', 'L']\n elif dir == 3: # left\n if self.orientation == 1:\n self.orientation = 3\n return ['L', 'L']\n elif self.orientation == 3:\n return []\n elif self.orientation == 2:\n self.orientation = 3\n return ['R']\n elif self.orientation == 0:\n self.orientation = 3\n return ['L']",
"def possible_way(self, pos:tuple, direction:str, ispac = True):\r\n x_offset, y_offset = 0,0\r\n # Check the four different directions \r\n if direction == 'u':\r\n y_offset = -1\r\n elif direction == 'd':\r\n y_offset = 1\r\n elif direction == 'r':\r\n x_offset = 1\r\n elif direction == 'l':\r\n x_offset = -1\r\n x = pos[0] // self.grid_size\r\n y = pos[1] // self.grid_size\r\n # If the x position is out of the gamefield that means the figure is at the end of the tunnel\r\n if x + x_offset >= len(self.look_up_table[1]) or x + x_offset < 0:\r\n return 'os' #< os: other side -> The figure has to spwan at the other side of the gamefield\r\n # Get the value from the look up table\r\n value = self.look_up_table[y + y_offset][x + x_offset] \r\n # Check if the value is a dot or an Energizer \r\n if value != None and (value[0] =='p' or value[0] == 'e') and ispac:\r\n # Check if the end of the value field is a 'n' (not). The 'n' shouldn't remove from the gamefield.\r\n if value[-1] == 'n':\r\n self.look_up_table[y + y_offset][x + x_offset] = 'n'\r\n else:\r\n # Remove the dot or the energizer from the gamefield if Pac-Man eats them.\r\n self.look_up_table[y + y_offset][x + x_offset] = None\r\n return value",
"def ApplyDirection(pos, direction):\n\n row = pos // 3\n col = pos % 3\n\n row += direction[0]\n\n if row < 0 or row > 2:\n return -1\n\n col += direction[1]\n\n if col < 0 or col > 2:\n return -1\n\n return int(row * 3 + col)",
"def flip_direction(direction):\n if direction==\"NORTH\": return \"SOUTH\"\n if direction==\"SOUTH\": return \"NORTH\"\n if direction==\"WEST\": return \"EAST\"\n if direction==\"EAST\": return \"WEST\"\n elif isinstance(direction, float):\n return (direction + np.pi)%(2*np.pi)",
"def str_ij(d,db,i,j, **kwargs):\n style = kwargs.get(\"style\", None)\n if style == \"plain\":\n d2s = {\"N\":\"^\", \"W\":\"<\", \"NW\":\"\\\\\"}\n else:\n d2s = {\"N\":\"\\u2191\", \"W\":\"\\u2190\", \"NW\":\"\\u2196\"}\n direction = \"\"\n if (i,j) in db:\n direction = \"\".join([d2s[x] for x in db[i,j]])+\" \"\n return direction+str(d[i,j])"
]
| [
"0.66820127",
"0.6573184",
"0.6548901",
"0.64026684",
"0.61723566",
"0.6157513",
"0.6071772",
"0.60612214",
"0.593492",
"0.59305704",
"0.5926606",
"0.5891758",
"0.58892643",
"0.58686227",
"0.58390796",
"0.58159703",
"0.5814856",
"0.5676596",
"0.5657164",
"0.5651279",
"0.5647685",
"0.56296235",
"0.562708",
"0.5609726",
"0.56041133",
"0.5597715",
"0.5576226",
"0.55382985",
"0.55346733",
"0.55242926"
]
| 0.71011275 | 0 |
create and save species dict for convenience | def create_species_encode():
data = pd.read_csv("../train.csv")
species = sorted(data.species.unique())
species_dict = {species: index for index, species in enumerate(species)}
return species_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def as_dict(self):\n species_dict = dict()\n species_dict['force_field'] = self.force_field\n species_dict['is_ts'] = self.is_ts\n if self.e_elect is not None:\n species_dict['e_elect'] = self.e_elect\n if self.e0 is not None:\n species_dict['e0'] = self.e0\n species_dict['arkane_file'] = self.arkane_file\n if self.yml_path is not None:\n species_dict['yml_path'] = self.yml_path\n if self.is_ts:\n species_dict['ts_methods'] = self.ts_methods\n species_dict['ts_guesses'] = [tsg.as_dict() for tsg in self.ts_guesses]\n species_dict['ts_conf_spawned'] = self.ts_conf_spawned\n species_dict['ts_number'] = self.ts_number\n species_dict['ts_report'] = self.ts_report\n species_dict['rxn_label'] = self.rxn_label\n species_dict['successful_methods'] = self.successful_methods\n species_dict['unsuccessful_methods'] = self.unsuccessful_methods\n species_dict['chosen_ts_method'] = self.chosen_ts_method\n species_dict['chosen_ts'] = self.chosen_ts\n if self.run_time is not None:\n species_dict['run_time'] = self.run_time.total_seconds()\n species_dict['t1'] = self.t1\n species_dict['label'] = self.label\n species_dict['long_thermo_description'] = self.long_thermo_description\n species_dict['multiplicity'] = self.multiplicity\n if self.number_of_radicals is not None:\n species_dict['number_of_radicals'] = self.number_of_radicals\n species_dict['charge'] = self.charge\n species_dict['generate_thermo'] = self.generate_thermo\n if self.opt_level is not None:\n species_dict['opt_level'] = self.opt_level\n if self.final_xyz is not None:\n species_dict['final_xyz'] = self.final_xyz\n species_dict['number_of_rotors'] = self.number_of_rotors\n species_dict['rotors_dict'] = self.rotors_dict\n species_dict['external_symmetry'] = self.external_symmetry\n species_dict['optical_isomers'] = self.optical_isomers\n species_dict['neg_freqs_trshed'] = self.neg_freqs_trshed\n if self.conf_is_isomorphic is not None:\n species_dict['conf_is_isomorphic'] = self.conf_is_isomorphic\n if self.bond_corrections is not None:\n species_dict['bond_corrections'] = self.bond_corrections\n if self.mol is not None:\n species_dict['mol'] = self.mol.toAdjacencyList()\n if self.initial_xyz is not None:\n species_dict['initial_xyz'] = self.initial_xyz\n if self.checkfile is not None:\n species_dict['checkfile'] = self.checkfile\n if self.most_stable_conformer is not None:\n species_dict['most_stable_conformer'] = self.most_stable_conformer\n if self.cheap_conformer is not None:\n species_dict['cheap_conformer'] = self.cheap_conformer\n if self.recent_md_conformer is not None:\n species_dict['recent_md_conformer'] = self.recent_md_conformer\n if self.svpfit_output_file is not None:\n species_dict['svpfit_output_file'] = self.svpfit_output_file\n if self._radius is not None:\n species_dict['radius'] = self._radius\n if self.conformers:\n species_dict['conformers'] = self.conformers\n species_dict['conformer_energies'] = self.conformer_energies\n if self.conformers_before_opt is not None:\n species_dict['conformers_before_opt'] = self.conformers_before_opt\n if self.bdes is not None:\n species_dict['bdes'] = self.bdes\n return species_dict",
"def species_table(self):\n if self.hdf5_data is None:\n return None\n species_section = self.hdf5_data.get('/species', None)\n if species_section is None:\n return None\n return dict(\n (id, dict(name=name, radius=radius, D=D))\n for id, name, radius, D in species_section.value)",
"def make_save(self):\n\t\tsave = {}\n\t\tsave['p'] = self.p\n\t\tsave['injail'] = self.injail.copy()\n\t\tsave['tile'] = self.tile.copy()\n\t\tsave['bal'] = self.bal.copy()\n\t\tsave['goojf'] = self.goojf.copy()\n\t\tsave['isalive'] = self.isalive.copy()\n\t\tsave['jailturn'] = self.jailturn.copy()\n\t\tsave['ownedby'] = self.ownedby.copy()\n\t\tsave['numhouse'] = self.numhouse.copy()\n\t\tsave['ismortgaged'] = self.ismortgaged.copy()\n\t\tsave['num'] = self.num\n\t\tsave['numalive'] = self.numalive\n\t\tsave['uid'] = self.uid.copy()\n\t\tsave['freeparkingsum'] = self.freeparkingsum\n\t\tself.autosave = save",
"def __init__(self, taxid, species_name = None, lineage=None):\n self.genes = dict()\n self.taxid = taxid\n self.species = species_name\n self.lineage = lineage",
"def divide_to_species(self):\n titles = []\n for i in self.rest:\n titles.append(i.title.split(\" \"))\n for i in range(len(titles)):\n for j in range(i, len(titles)):\n if titles[i][0] == titles[j][0] and titles[i][1] == titles[j][1]:\n if \" \".join(titles[i]) not in [z.title for z in self.species[\" \".join(titles[i][:2])]]:\n self.rest[i].species = \" \".join(titles[i])\n self.species[\" \".join(titles[i][:2])].append(self.rest[i])\n if \" \".join(titles[j]) not in [z.title for z in self.species[\" \".join(titles[j][:2])]]:\n self.rest[j].species = \" \".join(titles[j])\n self.species[\" \".join(titles[j][:2])].append(self.rest[j])\n\n self.name_of_species = list(self.species.keys())\n\n for i in self.species.keys():\n self.count_species[i] = len(self.species[i])",
"def load_individual_species():\n\n print (\"individual species\")\n\n SpeciesIndividual.query.delete()\n\n with open(\"seed_data/species_seed.psv\") as species:\n for row in species:\n species_name, group_id = row.strip().split(\"|\")\n\n species = SpeciesIndividual(species_name=species_name,\n species_group_id=group_id)\n\n db.session.add(species)\n\n db.session.commit()",
"def _species(self, hdr):\n # Called PolyAtomic in OpenMIMS source\n d = {}\n\n d['numeric flag'], d['numeric value'], d['elements'], \\\n d['charges'], d['charge label'], d['label'] = \\\n unpack(self._bo + '4i c 64s', hdr.read(81))\n\n d['label'] = self._cleanup_string(d['label'])\n d['charge label'] = self._cleanup_string(d['charge label'])\n\n # OpenMIMS says 3 bytes AFTER el.table are unused; this is wrong,\n # 3 bytes BEFORE el.table (b 81-84) are unused. n_elements (here:\n # atomic number) is element number in periodic table rather than\n # number of elements. n_isotopes (here: isotope number) is offset from\n # main atomic Z number. Also: collapse ElementTable (Tabelts) into\n # main dict, too many layers.\n hdr.seek(3, 1)\n atoms = unpack(self._bo + '15i', hdr.read(60))\n d['atomic number'] = tuple(n for n in atoms[::3])\n d['isotope number'] = tuple(n for n in atoms[1::3])\n d['stoich number'] = tuple(n for n in atoms[2::3])\n return d",
"def createSpecies(self):\n return _libsbml.Model_createSpecies(self)",
"def set_species(self, species):\n self.species = species",
"def test_speciesCreation():\n \n sys = LVsystem.Ecosystem()\n sys.addSpecies('rabbit')\n sys.addSpecies('fox')\n sys.setInteraction('rabbit', 'fox', -1)\n sys.setInteraction('fox', 'rabbit', 1)\n sys.setInitialCond('rabbit', 10)\n sys.setInitialCond('fox', 5)\n sys.setGrowthRate('rabbit', 1)\n sys.setGrowthRate('fox', -1)\n sys.setCarrCap('rabbit', 10000)\n sys.setCarrCap('fox', 10000)\n sys.setChangeRate('rabbit', 10)\n sys.setChangeRate('fox', 20) \n \n assert len(sys.species_list) == 2\n assert sys.species_list == ['rabbit','fox']\n assert sys.intMatrix == {('rabbit','fox'):-1, ('fox','rabbit'):1}\n\n sys.removeSpecies('rabbit')\n sys.removeSpecies('fox')",
"def testMakeNewSpecies(self):\n\n # adding 3 unique species:\n cerm = CoreEdgeReactionModel()\n\n spcs = [Species().fromSMILES('[OH]'), \n Species().fromSMILES('CC'),\n Species().fromSMILES('[CH3]')]\n\n for spc in spcs:\n cerm.makeNewSpecies(spc)\n\n self.assertEquals(len(cerm.speciesDict), len(spcs)) \n self.assertEquals(len(cerm.indexSpeciesDict), len(spcs))\n\n # adding 3 unique, and 1 already existing species:\n cerm = CoreEdgeReactionModel()\n\n spcs = [Species().fromSMILES('[OH]'), \n Species().fromSMILES('CC'),\n Species().fromSMILES('[CH3]'),\n Species().fromSMILES('CC')]#duplicate species\n\n for spc in spcs:\n cerm.makeNewSpecies(spc)\n\n self.assertEquals(len(cerm.speciesDict), len(spcs) - 1) \n self.assertEquals(len(cerm.indexSpeciesDict), len(spcs) - 1)",
"def test_make_new_species(self):\n\n # adding 3 unique species:\n cerm = CoreEdgeReactionModel()\n\n spcs = [Species().from_smiles('[OH]'),\n Species().from_smiles('CC'),\n Species().from_smiles('[CH3]')]\n\n for spc in spcs:\n cerm.make_new_species(spc)\n\n self.assertEquals(len(cerm.species_dict), len(spcs))\n self.assertEquals(len(cerm.index_species_dict), len(spcs))\n\n # adding 3 unique, and 1 already existing species:\n cerm = CoreEdgeReactionModel()\n\n spcs = [Species().from_smiles('[OH]'),\n Species().from_smiles('CC'),\n Species().from_smiles('[CH3]'),\n Species().from_smiles('CC')] # duplicate species\n\n for spc in spcs:\n cerm.make_new_species(spc)\n\n self.assertEquals(len(cerm.species_dict), len(spcs) - 1)\n self.assertEquals(len(cerm.index_species_dict), len(spcs) - 1)",
"def init(sFileName, sDescription):\n \n try:\n with open(sFileName) as f:\n my_dict = json.load(f)\n \n \n except:\n \n #assume there was an error, possibly the file does not exist\n my_dict = {'descriptor':sDescription ,'measurements':[]}\n with open (sFileName, 'w') as f:\n json.dump(my_dict,f)\n \n return my_dict",
"def speciate(self):\n\n\n # Clear out the previous generation\n for spec in self.species.values():\n spec.champ = spec.get_champion()\n spec.flush()\n\n for genome in self.all_genomes:\n if genome.species_hint is not None:\n spec = self.species.get(genome.species_hint)\n if spec and spec.is_compatible(genome):\n spec.add_genome(genome)\n continue\n\n for spec in self.species.values():\n # check compatibility until found\n if spec.is_compatible(genome):\n spec.add_genome(genome)\n break\n else: # make a new spec\n spec_num = self.get_next_species_num()\n spec = Species(self, spec_num)\n spec.add_genome(genome)\n spec.champ = genome\n self.species[spec_num] = spec\n\n # Delete unnecessary species\n for spec_num, spec in list(self.species.items()):\n if len(spec)==0:\n self.species.pop(spec_num)",
"def saveDict(self):\n sd = dict()\n sd[\"name\"] = self.name\n sd[\"driverName\"] = self.driverName\n sd[\"optDriverName\"] = self.optDriverName\n sd[\"auxDriverName\"] = self.auxDriverName\n sd[\"runType\"] = self.runType\n sd[\"inputNames\"] = self.inputNames\n sd[\"outputNames\"] = self.outputNames\n sd[\"inputTypes\"] = self.inputTypes\n sd[\"inputMins\"] = self.inputMins.tolist()\n sd[\"inputMaxs\"] = self.inputMaxs.tolist()\n sd[\"inputDists\"] = []\n\n if self.inputDists:\n for dist in self.inputDists:\n sd[\"inputDists\"].append(dist.saveDict())\n\n sd[\"inputDefaults\"] = self.inputDefaults.tolist()\n sd[\"outputSelections\"] = self.outputSelections\n sd[\"emulatorOutputStats\"] = self.emulatorOutputStatus\n sd[\"emulatorTrainingFile\"] = self.emulatorTrainingFile\n sd[\"namesIncludeNodes\"] = self.namesIncludeNodes\n sd[\"inputsFlowsheetFixed\"] = self.flowsheetFixed\n return sd",
"def getSpeciesIds(self):\n species = {}\n result_args = self.cursor.callproc(\"get_all_species\")\n # process the result\n for result in self.cursor.stored_results():\n for r in result:\n # print(r)\n species[r[1]] = r[0]\n\n return species",
"def from_dict(self, species_dict):\n try:\n self.label = species_dict['label']\n except KeyError:\n raise InputError('All species must have a label')\n self.run_time = datetime.timedelta(seconds=species_dict['run_time']) if 'run_time' in species_dict else None\n self.t1 = species_dict['t1'] if 't1' in species_dict else None\n self.e_elect = species_dict['e_elect'] if 'e_elect' in species_dict else None\n self.e0 = species_dict['e0'] if 'e0' in species_dict else None\n self.arkane_file = species_dict['arkane_file'] if 'arkane_file' in species_dict else None\n self.yml_path = species_dict['yml_path'] if 'yml_path' in species_dict else None\n self.rxn_label = species_dict['rxn_label'] if 'rxn_label' in species_dict else None\n self._radius = species_dict['radius'] if 'radius' in species_dict else None\n self.most_stable_conformer = species_dict['most_stable_conformer'] if 'most_stable_conformer'\\\n in species_dict else None\n self.cheap_conformer = species_dict['cheap_conformer'] if 'cheap_conformer' in species_dict else None\n self.recent_md_conformer = species_dict['recent_md_conformer']\\\n if 'recent_md_conformer' in species_dict else None\n self.force_field = species_dict['force_field'] if 'force_field' in species_dict else 'MMFF94'\n self.svpfit_output_file = species_dict['svpfit_output_file'] if 'svpfit_output_file' in species_dict else None\n self.long_thermo_description = species_dict['long_thermo_description']\\\n if 'long_thermo_description' in species_dict else ''\n self.initial_xyz = standardize_xyz_string(species_dict['initial_xyz']) if 'initial_xyz' in species_dict\\\n else None\n self.final_xyz = standardize_xyz_string(species_dict['final_xyz']) if 'final_xyz' in species_dict else None\n self.conf_is_isomorphic = species_dict['conf_is_isomorphic'] if 'conf_is_isomorphic' in species_dict else None\n self.is_ts = species_dict['is_ts'] if 'is_ts' in species_dict else False\n if self.is_ts:\n self.ts_conf_spawned = species_dict['ts_conf_spawned'] if 'ts_conf_spawned' in species_dict else False\n self.ts_number = species_dict['ts_number'] if 'ts_number' in species_dict else None\n self.ts_report = species_dict['ts_report'] if 'ts_report' in species_dict else ''\n ts_methods = species_dict['ts_methods'] if 'ts_methods' in species_dict else None\n if ts_methods is None:\n self.ts_methods = default_ts_methods\n elif isinstance(ts_methods, list):\n self.ts_methods = ts_methods\n if not self.ts_methods:\n self.ts_methods = ['user guess']\n else:\n raise TSError('ts_methods must be a list, got {0} of type {1}'.format(ts_methods, type(ts_methods)))\n self.ts_guesses = [TSGuess(ts_dict=tsg) for tsg in species_dict['ts_guesses']]\\\n if 'ts_guesses' in species_dict else list()\n self.successful_methods = species_dict['successful_methods']\\\n if 'successful_methods' in species_dict else list()\n self.unsuccessful_methods = species_dict['unsuccessful_methods']\\\n if 'unsuccessful_methods' in species_dict else list()\n self.chosen_ts_method = species_dict['chosen_ts_method'] if 'chosen_ts_method' in species_dict else None\n self.chosen_ts = species_dict['chosen_ts'] if 'chosen_ts' in species_dict else None\n self.checkfile = species_dict['checkfile'] if 'checkfile' in species_dict else None\n else:\n self.ts_methods = None\n if 'xyz' in species_dict and self.initial_xyz is None and self.final_xyz is None:\n self.process_xyz(species_dict['xyz'])\n for char in self.label:\n if char not in valid_chars:\n raise SpeciesError('Species label {0} contains an invalid character: \"{1}\"'.format(self.label, char))\n self.multiplicity = species_dict['multiplicity'] if 'multiplicity' in species_dict else None\n self.charge = species_dict['charge'] if 'charge' in species_dict else 0\n if 'charge' not in species_dict:\n logger.debug('No charge specified for {0}, assuming charge 0.'.format(self.label))\n if self.is_ts:\n self.generate_thermo = False\n else:\n self.generate_thermo = species_dict['generate_thermo'] if 'generate_thermo' in species_dict else True\n self.number_of_radicals = species_dict['number_of_radicals'] if 'number_of_radicals' in species_dict else None\n self.opt_level = species_dict['opt_level'] if 'opt_level' in species_dict else None\n self.number_of_rotors = species_dict['number_of_rotors'] if 'number_of_rotors' in species_dict else 0\n self.rotors_dict = species_dict['rotors_dict'] if 'rotors_dict' in species_dict else dict()\n self.external_symmetry = species_dict['external_symmetry'] if 'external_symmetry' in species_dict else None\n self.optical_isomers = species_dict['optical_isomers'] if 'optical_isomers' in species_dict else None\n self.neg_freqs_trshed = species_dict['neg_freqs_trshed'] if 'neg_freqs_trshed' in species_dict else list()\n self.bond_corrections = species_dict['bond_corrections'] if 'bond_corrections' in species_dict else dict()\n try:\n self.mol = Molecule().fromAdjacencyList(str(species_dict['mol'])) if 'mol' in species_dict else None\n except (ValueError, InvalidAdjacencyListError) as e:\n logger.error('Could not read RMG adjacency list {0}. Got:\\n{1}'.format(species_dict['mol'] if 'mol'\n in species_dict else None,\n e.message))\n self.mol = None\n smiles = species_dict['smiles'] if 'smiles' in species_dict else None\n inchi = species_dict['inchi'] if 'inchi' in species_dict else None\n adjlist = species_dict['adjlist'] if 'adjlist' in species_dict else None\n if self.mol is None:\n if adjlist is not None:\n self.mol = Molecule().fromAdjacencyList(adjlist=adjlist)\n elif inchi is not None:\n self.mol = rmg_mol_from_inchi(inchi)\n elif smiles is not None:\n self.mol = Molecule(SMILES=smiles)\n if self.mol is None and not self.is_ts:\n self.mol_from_xyz()\n if self.mol is not None:\n if 'bond_corrections' not in species_dict:\n self.bond_corrections = enumerate_bonds(self.mol)\n if self.bond_corrections:\n self.long_thermo_description += 'Bond corrections: {0}\\n'.format(self.bond_corrections)\n if self.multiplicity is None:\n self.multiplicity = self.mol.multiplicity\n if self.charge is None:\n self.charge = self.mol.getNetCharge()\n if self.mol_list is None:\n if not self.charge:\n self.mol_list = self.mol.generate_resonance_structures(keep_isomorphic=False,\n filter_structures=True)\n if 'conformers' in species_dict:\n self.conformers = species_dict['conformers']\n self.conformer_energies = species_dict['conformer_energies'] if 'conformer_energies' in species_dict\\\n else [None] * len(self.conformers)\n if self.mol is None and self.initial_xyz is None and self.final_xyz is None and not self.conformers\\\n and not any([tsg.xyz for tsg in self.ts_guesses]):\n # TS species are allowed to be loaded w/o a structure\n raise SpeciesError('Must have either mol or xyz for species {0}'.format(self.label))\n self.bdes = species_dict['bdes'] if 'bdes' in species_dict else None\n if self.bdes is not None and not isinstance(self.bdes, list):\n raise SpeciesError('The .bdes argument must be a list, got {0} which is a {1}'.format(\n self.bdes, type(self.bdes)))\n\n self.conformers_before_opt = species_dict['conformers_before_opt'] \\\n if 'conformers_before_opt' in species_dict else None",
"def make_image_dict(self):\n sprite_sheet = setup.GFX['treasurechest']\n image_dict = {'closed': self.get_image(0, 0, 32, 32, sprite_sheet),\n 'opened': self.get_image(32, 0, 32, 32, sprite_sheet)}\n\n return image_dict",
"def make():\n data = {}\n data.update({'earth' : {'description': 'Planet with 20% O2 with 75% of surface covered by H2O. Humans inhabitants enjoy both of these aspects.',\n 'order' : 1,\n 'type': 'planet',\n }})\n \n return data",
"def _store(self):\n store_dict = {}\n for key in self._data:\n val = self._data[key]\n if SparseParameter._is_supported_matrix(val):\n serial_string = SparseParameter._serialize_matrix(val)\n store_dict[\"%s%s\" % (key, SparseParameter.IDENTIFIER)] = serial_string\n else:\n store_dict[key] = val\n\n return store_dict",
"def create_dicts():\n load_data_for_dict('data/atis/train/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/valid/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/test/seq.in', 'data/atis/voc/vocabulary.json') \n load_data_for_dict('data/atis/train/seq.out', 'data/atis/voc/slot_vocabulary.json')",
"def store_state():\n global states\n dmxes = request.args.get(\"dmxes\").split(\",\")\n pos = int(request.args.get(\"position\", default=-1))\n name = request.args.get(\"name\", default='')\n with open('data/states.json', 'r') as f:\n states = json.loads(f.read())\n savestate = {\"name\":name}\n for dmx in dmxes:\n if \"-\" in dmx:\n print dmx\n fromto = dmx.split(\"-\")\n for addr in range(int(fromto[0]), int(fromto[1])+1):\n print addr\n savestate[addr] = adresses[int(addr)]\n else:\n savestate[dmx] = adresses[int(dmx)]\n print str(len(states)) + \":\" + str(pos)\n if pos >= 0 and pos < len(states):\n states[pos] = savestate\n else:\n states.append(savestate)\n pos = len(states)-1\n with open('data/states.json', 'w') as f:\n f.write(json.dumps(states))\n return json_back()",
"def initialize_output_dict(self, label: Optional[str] = None):\n if label is not None or not self._does_output_dict_contain_info():\n for species in self.species_list:\n if label is None or species.label == label:\n if species.label not in self.output:\n self.output[species.label] = dict()\n if 'paths' not in self.output[species.label]:\n self.output[species.label]['paths'] = dict()\n path_keys = ['geo', 'freq', 'sp', 'composite']\n for key in path_keys:\n if key not in self.output[species.label]['paths']:\n self.output[species.label]['paths'][key] = ''\n if 'irc' not in self.output[species.label]['paths'] and species.is_ts:\n self.output[species.label]['paths']['irc'] = list()\n if 'job_types' not in self.output[species.label]:\n self.output[species.label]['job_types'] = dict()\n for job_type in list(set(self.job_types.keys())) + ['opt', 'freq', 'sp', 'composite', 'onedmin']:\n if job_type in ['rotors', 'bde']:\n # rotors could be invalidated due to many reasons,\n # also could be falsely identified in a species that has no torsional modes.\n self.output[species.label]['job_types'][job_type] = True\n else:\n self.output[species.label]['job_types'][job_type] = False\n keys = ['conformers', 'isomorphism', 'convergence', 'restart', 'errors', 'warnings', 'info']\n for key in keys:\n if key not in self.output[species.label]:\n if key == 'convergence':\n self.output[species.label][key] = None\n else:\n self.output[species.label][key] = ''",
"def createSpeciesType(self):\n return _libsbml.Model_createSpeciesType(self)",
"def save_data():\n new_data = {}\n new_data['data sources'] = []\n new_data['metros'] = []\n new_data['routes'] = []\n for code in set_of_cities:\n city = set_of_cities[code]\n data = {}\n data['code'] = city.code\n data['name'] = city.name\n data['country'] = city.country\n data['continent'] = city.continent\n data['timezone'] = city.timezone\n data['coordinates'] = city.coordinates\n data['population'] = city.population\n data['region'] = city.region\n new_data['metros'].append(data)\n for route in set_of_route:\n code_of_cities = route.code_of_cities\n distance = route.distance\n data = {}\n data['ports'] = code_of_cities\n data['distance'] = distance\n new_data['routes'].append(data)\n\n with open(\"new_data.json\", 'wb') as outfile:\n json.dump(new_data, outfile, sort_keys=True, indent=4, separators=(',', ':'))\n\n print \"Changes saved in new_data.json\"",
"def save(self):\n if self._mode == 'dict':\n self._mode = 'shelve'\n self._shelve_mode = 'c'\n\n for key, value in self._dict.items():\n ckey = copy.copy(key)\n cvalue = copy.copy(value)\n self.add(ckey, cvalue, 'shelve', check=False)\n\n self._dict.clear()\n\n if self._mode == 'dict':\n self._mode = 'dict'\n self._shelve_mode = 'r'",
"def toDict(self):\n \n d = {}\n d['sp'] = self.species\n d['gns'] = self.genera\n d['fam'] = self.families\n d['ord'] = self.orders\n d['cls'] = self.classes\n d['phy'] = self.phyla\n d['kng'] = self.kingdoms\n \n return d",
"def save(self):\r\n for obs_name in self.__dict__.keys():\r\n if obs_name is not \"_ObjetSimu__obs\":\r\n if not obs_name in self.__sous_objets:\r\n if obs_name in self.__obs.keys():\r\n if \"copy\" in dir(self.__dict__[obs_name]):\r\n self.__obs[obs_name].append(self.__dict__[obs_name].copy())\r\n else:\r\n self.__obs[obs_name].append(self.__dict__[obs_name])\r\n else:\r\n self.__dict__[obs_name].save()",
"def save_track_to_dict(self, filename):\n\n # save track data\n track_dict = {\n 'size': self.size,\n 'points': self.points,\n 'pivots': self.pivots,\n 'seed': self.noise_seed,\n 'data': self.track_data\n }\n\n np.save(\"./static/\" + filename, track_dict)\n\n print(\"Track saved successfully\")",
"def _store(self):\n store_dict = {}\n\n if self._data is not None:\n dump = pickle.dumps(self._data, protocol=self.v_protocol)\n store_dict[\"data\"] = dump\n store_dict[PickleParameter.PROTOCOL] = self.v_protocol\n\n if self.f_has_range():\n\n store_dict[\"explored_data\"] = ObjectTable(\n columns=[\"idx\"], index=list(range(len(self)))\n )\n\n smart_dict = {}\n count = 0\n\n for idx, val in enumerate(self._explored_range):\n\n obj_id = id(val)\n\n if obj_id in smart_dict:\n name_id = smart_dict[obj_id]\n add = False\n else:\n name_id = count\n add = True\n\n name = self._build_name(name_id)\n store_dict[\"explored_data\"][\"idx\"][idx] = name_id\n\n if add:\n store_dict[name] = pickle.dumps(val, protocol=self.v_protocol)\n smart_dict[obj_id] = name_id\n count += 1\n\n self._locked = True\n\n return store_dict"
]
| [
"0.71935356",
"0.61733454",
"0.59725386",
"0.59544873",
"0.5883405",
"0.5852764",
"0.57449347",
"0.5722406",
"0.56899935",
"0.5646143",
"0.5622566",
"0.56088024",
"0.5546671",
"0.5531829",
"0.55188143",
"0.5500924",
"0.54891515",
"0.54876214",
"0.54754686",
"0.5445157",
"0.5432976",
"0.5423305",
"0.5409891",
"0.5398439",
"0.5384772",
"0.53590924",
"0.53415763",
"0.5340667",
"0.5339759",
"0.53348225"
]
| 0.675943 | 1 |
Export samplesheets for Bioanalyzer machine. | def export_bioanalyzer(args):
clarity_epp.export.bioanalyzer.samplesheet(lims, args.process_id, args.output_file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_generate_sample_sheet(self):\n pass",
"def export_tapestation(args):\n clarity_epp.export.tapestation.samplesheet(lims, args.process_id, args.output_file)",
"def start_output(self):\r\n self.create_output_file()\r\n\r\n for elem in range(len(self.output_zakladki)):\r\n self.output_file.create_sheet(self.output_zakladki[elem], elem)\r\n\r\n self.remowe_first_sheet()",
"def export_tecan(args):\n clarity_epp.export.tecan.samplesheet(lims, args.process_id, args.type, args.output_file)",
"def test_export_spreadsheet(self):\r\n client = self.getClient()\r\n if client:\r\n exp = [['#SampleID', 'DOB'],\r\n ['#Example mapping file for the QIIME analysis package. '\r\n 'These 9 samples are from a study of the effects of exercise '\r\n 'and diet on mouse cardiac physiology (Crawford, et al, '\r\n 'PNAS, 2009).'], ['PC.354', '20061218'],\r\n ['PC.355', '20061218'], ['PC.356', '20061126'],\r\n ['PC.481', '20070314'], ['PC.593', '20071210'],\r\n ['PC.607', '20071112'], ['PC.634', '20080116'],\r\n ['PC.635', '20080116'], ['PC.636', '20080116']]\r\n obs = _export_spreadsheet(client, self.spreadsheet_key,\r\n self.worksheet_id, ['#SampleID', 'DOB'])\r\n self.assertEqual(obs, exp)\r\n else:\r\n raise GoogleSpreadsheetConnectionError(\"Cannot execute test \"\r\n \"without an active Internet connection.\")",
"def generate_sample_sheet(self):\n pool = self.pool\n bcl2fastq_sample_ids = []\n i7_names = []\n i7_sequences = []\n i5_names = []\n i5_sequences = []\n wells = []\n plate = pool.container.external_id\n sample_ids = []\n sequencer_type = self.sequencer.equipment_type\n\n for component in pool.components:\n lp_composition = component['composition']\n # Get the well information\n wells.append(lp_composition.container.well_id)\n # Get the i7 index information\n i7_comp = lp_composition.i7_composition.primer_set_composition\n i7_names.append(i7_comp.external_id)\n i7_sequences.append(i7_comp.barcode)\n # Get the i5 index information\n i5_comp = lp_composition.i5_composition.primer_set_composition\n i5_names.append(i5_comp.external_id)\n i5_sequences.append(i5_comp.barcode)\n # Get the sample id\n sample_id = lp_composition.normalized_gdna_composition.\\\n gdna_composition.sample_composition.content\n sample_ids.append(sample_id)\n\n # Transform te sample ids to be bcl2fastq-compatible\n bcl2fastq_sample_ids = [\n SequencingProcess._bcl_scrub_name(sid) for sid in sample_ids]\n # Reverse the i5 sequences if needed based on the sequencer\n i5_sequences = SequencingProcess._sequencer_i5_index(\n sequencer_type, i5_sequences)\n\n data = SequencingProcess._format_sample_sheet_data(\n bcl2fastq_sample_ids, i7_names, i7_sequences, i5_names,\n i5_sequences, wells=wells, sample_plate=plate,\n description=sample_ids, sample_proj=self.run_name,\n lanes=self.lanes, sep=',')\n\n contacts = {c.name: c.email for c in self.contacts}\n pi = self.principal_investigator\n principal_investigator = {pi.name: pi.email}\n sample_sheet_dict = {\n 'comments': SequencingProcess._format_sample_sheet_comments(\n principal_investigator=principal_investigator,\n contacts=contacts),\n 'IEMFileVersion': '4',\n 'Investigator Name': pi.name,\n 'Experiment Name': self.experiment,\n 'Date': str(self.date),\n 'Workflow': 'GenerateFASTQ',\n 'Application': 'FASTQ Only',\n 'Assay': self.assay,\n 'Description': '',\n 'Chemistry': 'Default',\n 'read1': self.fwd_cycles,\n 'read2': self.rev_cycles,\n 'ReverseComplement': '0',\n 'data': data}\n return SequencingProcess._format_sample_sheet(sample_sheet_dict)",
"def export_sample_indications(args):\n clarity_epp.export.sample.sample_indications(\n lims, args.output_file, args.artifact_name, args.sequencing_run, args.sequencing_run_project\n )",
"def _cmd_export_bed(args):\n bed_tables = []\n for segfname in args.segments:\n segments = read_cna(segfname)\n # ENH: args.sample_sex as a comma-separated list\n is_sample_female = verify_sample_sex(\n segments, args.sample_sex, args.male_reference, args.diploid_parx_genome\n )\n if args.sample_id:\n label = args.sample_id\n elif args.label_genes:\n label = None\n else:\n label = segments.sample_id\n tbl = export.export_bed(\n segments,\n args.ploidy,\n args.male_reference,\n args.diploid_parx_genome,\n is_sample_female,\n label,\n args.show,\n )\n bed_tables.append(tbl)\n table = pd.concat(bed_tables)\n write_dataframe(args.output, table, header=False)",
"def export_hamilton(args):\n if args.type == 'filling_out':\n clarity_epp.export.hamilton.samplesheet_filling_out(lims, args.process_id, args.output_file)\n elif args.type == 'purify':\n clarity_epp.export.hamilton.samplesheet_purify(lims, args.process_id, args.output_file)",
"def export_caliper(args):\n if args.type == 'normalise':\n clarity_epp.export.caliper.samplesheet_normalise(lims, args.process_id, args.output_file)\n elif args.type == 'dilute':\n clarity_epp.export.caliper.samplesheet_dilute(lims, args.process_id, args.output_file)",
"def main():\n\n gephyrin_df = gephyrin_pairwise()\n cav31_df = cav31_pairwise()\n synapsin_df = synapsin_pairwise()\n psd_df = psd95_pairwise()\n vglut1_df = vglut1_pairwise()\n\n\n sheet_name = 'Pairwise'\n fn = 'pairwise_comparisons.xlsx'\n df_list = [synapsin_df, vglut1_df, psd_df, gephyrin_df, cav31_df]\n aa.write_dfs_to_excel(df_list, sheet_name, fn)",
"def generate_figures_and_xls_all_strains(outdir, cols_starts, region2data, ext, xls, group2pos, feature_names, samples):\n all_freqs = []\n # concatenate all pos and samples into one dataframe\n dframes = []\n for ri, (ref, pos) in enumerate(region2data.keys()): #regions): #[3]#; print(ref, pos, mt)\n mer, calls = region2data[(ref, pos)]\n for c, s in zip(calls, samples): \n df = pd.DataFrame(c, columns=feature_names)\n df[\"Strain\"] = s\n df[\"chr_pos\"] = \"%s:%s\"%(ref, pos)\n dframes.append(df)\n # read all tsv files\n df = pd.concat(dframes).dropna().reset_index()\n chr_pos, strains = df[\"chr_pos\"].unique(), df[\"Strain\"].unique() \n # compare individual methods\n for clf, method in (\n (KMeans(n_clusters=2), \"KMeans\"), \n (KNeighborsClassifier(), \"KNN\"), \n #(iso_new.iForest(ntrees=100, random_state=0), \"GMM+eIF\"), \n (GaussianMixture(random_state=0, n_components=2), \"GMM\"), \n (AgglomerativeClustering(n_clusters=2), \"AggClust\"), \n #(OneClassSVM(), \"OCSVM\"), \n (IsolationForest(random_state=0), \"IF\"), \n #(iso_new.iForest(ntrees=100, random_state=0), \"eIF\"), \n (RandomForestClassifier(), \"RF\"), \n ):\n fname = method\n for i, cols_start in enumerate(cols_starts, 1):\n results = []\n feat_name = \"_\".join(cols_start)\n fname = \"%s.%s\"%(method, feat_name); print(fname)\n outfn = os.path.join(outdir, \"%s.%s\"%(fname, ext))\n # narrow down the features to only signal intensity & trace\n cols = list(filter(lambda n: n.startswith(cols_start), feature_names))#; print(cols) #, \"DT\"\n # compare all samples to 0%\n s0 = samples[0]\n for s in samples[3:]: \n with np.errstate(under='ignore'):\n if \"+\" in method:\n clf2_name = method.split(\"+\")[-1]\n results += get_mod_freq_two_step(df, cols, chr_pos, [s0, s], feat_name, \n OFFSET=0.5, clf2_name=clf2_name, clf2=clf)\n elif method in (\"KNN\", \"RF\"):\n results += get_mod_freq_clf_train_test(df, cols, chr_pos, [s0, s], samples[1:3], clf, feat_name)\n else:\n results += get_mod_freq_clf(df, cols, chr_pos, [s0, s], clf, feat_name)\n \n # and store mod_freq predicted by various methods\n freqs = pd.DataFrame(results, columns=[\"chr_pos\", \"features\", \"mod_freq wt\", \"mod_freq strain\", \"strain\"])\n freqs[\"diff\"] = freqs.max(axis=1)-freqs.min(axis=1); freqs\n for name, pos in group2pos.items(): #((\"negative\", negatives), (\"pU\", pU_pos), (\"Nm\", Nm_pos)):\n freqs.loc[freqs[\"chr_pos\"].isin(pos), \"group\"] = name\n #freqs.to_csv(outfn, sep=\"\\t\"); freqs.head()\n freqs.to_excel(xls, fname, index=False)\n # plot differences between methods\n for group, pos in group2pos.items():\n freqs.loc[freqs[\"chr_pos\"].isin(pos), \"modification\"] = group\n #return freqs\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))#, sharey=\"all\")\n sns.barplot(x=\"chr_pos\", y=\"mod_freq strain\", hue=\"strain\", edgecolor=\"white\", palette=[\"#f8786fff\", \"#7aae02ff\", \"#00bfc2ff\", \"#c67afeff\"], \n data=freqs[(freqs[\"features\"]==feat_name)&(freqs[\"group\"]==\"pU\")], ax=ax1)\n sns.barplot(x=\"chr_pos\", y=\"mod_freq strain\", hue=\"strain\", edgecolor=\"white\", palette=[\"#ed823aff\", \"#1c6ca9ff\", \"#35d1bbff\", \"#c978fdff\"], \n data=freqs[(freqs[\"features\"]==feat_name)&(freqs[\"group\"]==\"Nm\")], ax=ax2)\n ax1.set_ylabel(\"Per-site stoichiometry\"); ax2.set_ylabel(\"\")\n ax1.get_legend().remove(); ax2.get_legend().remove()#ax1.legend([]); ax2.legend([])\n ax1.set_ylim(0, 1); ax2.set_ylim(0, 1); #ax2.set(aspect=1.7)\n ax1.set_title(\"pU modifications\"); ax2.set_title(\"Nm modifications\")\n fig.suptitle(fname)\n fig.savefig(outfn)\n plt.close() # clear axis\n freqs[\"name\"] = fname\n all_freqs.append(freqs)\n return all_freqs",
"def driver():\n\n directory = r\"C:/Users/Aftab Alam/Documents/GitHub\"\n directory = directory + r\"/SRM-placement-analyser/data/\"\n fileList = [directory+\"InfosysResult.xlsx\",directory+\"TCSResult.xlsx\",directory+\"CognizantResult.xlsx\",directory+\"WiproResult.xlsx\"]\n \n listOfPlaced = extractCommonData.extractCommonData(fileList)\n createNewExcelSheet(directory,listOfPlaced)",
"def multi_sheet(self):\n # Initialize #\n all_sheets = []\n # Loop #\n for name in self.handle.sheet_names:\n sheet = self.handle.parse(name)\n sheet.insert(0, \"nace\", name)\n all_sheets.append(sheet)\n # Write #\n df = pandas.concat(all_sheets)\n df.to_csv(str(self.dest), **self.kwargs)",
"def export_samfile(self):",
"def export_illumina(args):\n clarity_epp.export.illumina.update_samplesheet(lims, args.process_id, args.artifact_id, args.output_file)",
"def export_sample(sample):\n outfile = os.path.join(sample.work_directory, sample.sample_id + '_sample.json')\n # print pretty JSON: print(json.dumps(parser.reads,indent=4, cls=CustomEncoder))\n with open(outfile, 'w') as out:\n # out.write(json.dumps(sample, indent=4, cls=CustomEncoder))\n json.dump(sample, out, cls=CustomEncoder)",
"def make_custom_sample_sheet(input_sample_sheet,output_sample_sheet=None,\n lanes=None,fmt=None):\n # Load the sample sheet data\n sample_sheet = IlluminaData.SampleSheet(input_sample_sheet)\n # Determine the column names for this format\n if sample_sheet.format == 'CASAVA':\n sample_col = 'SampleID'\n project_col = 'SampleProject'\n elif sample_sheet.format == 'IEM':\n sample_col = 'Sample_ID'\n project_col = 'Sample_Project'\n else:\n raise Exception(\"Unknown sample sheet format: %s\" %\n sample_sheet.format)\n # Add project names if not supplied\n for line in sample_sheet:\n if not line[project_col]:\n line[project_col] = line[sample_col]\n # Fix other problems\n sample_sheet.fix_illegal_names()\n sample_sheet.fix_duplicated_names()\n # Select subset of lanes if requested\n if lanes is not None:\n logging.debug(\"Updating to include only specified lanes: %s\" %\n ','.join([str(l) for l in lanes]))\n i = 0\n while i < len(sample_sheet):\n line = sample_sheet[i]\n if line['Lane'] in lanes:\n logging.debug(\"Keeping %s\" % line)\n i += 1\n else:\n del(sample_sheet[i])\n # Write out new sample sheet\n if output_sample_sheet is not None:\n sample_sheet.write(output_sample_sheet,fmt=fmt)\n return sample_sheet",
"def writeKnowledgeAreaWorksheets(wb: xlsxwriter.Workbook) -> None:\n\n global knowledgeAreas\n\n for knowledgeArea in knowledgeAreas:\n ws = wb.add_worksheet()\n ws.name = knowledgeArea.getText()[0:31]\n writeWorksheet(ws, knowledgeArea)",
"def export_wells(self, w, title):\r\n self._check_out(title)\r\n np.savez_compressed(os.path.join(self.out_dir, title, title), w)",
"def export_classification(out_name, table, asset_root, region, years, export='asset'):\n fc = ee.FeatureCollection(table)\n roi = ee.FeatureCollection(region)\n mask = roi.geometry().bounds().getInfo()['coordinates']\n\n classifier = ee.Classifier.randomForest(\n numberOfTrees=100,\n variablesPerSplit=0,\n minLeafPopulation=1,\n outOfBagMode=False).setOutputMode('CLASSIFICATION')\n\n input_props = fc.first().propertyNames().remove('YEAR').remove('POINT_TYPE').remove('system:index')\n\n trained_model = classifier.train(fc, 'POINT_TYPE', input_props)\n\n for yr in years:\n input_bands = stack_bands(yr, roi)\n annual_stack = input_bands.select(input_props)\n classified_img = annual_stack.classify(trained_model).int().set({\n 'system:index': ee.Date('{}-01-01'.format(yr)).format('YYYYMMdd'),\n 'system:time_start': ee.Date('{}-01-01'.format(yr)).millis(),\n 'system:time_end': ee.Date('{}-12-31'.format(yr)).millis(),\n 'image_name': out_name,\n 'class_key': '0: irrigated, 1: rainfed, 2: uncultivated, 3: wetland'})\n\n if export == 'asset':\n task = ee.batch.Export.image.toAsset(\n image=classified_img,\n description='{}_{}'.format(out_name, yr),\n assetId=os.path.join(asset_root, '{}_{}'.format(out_name, yr)),\n region=mask,\n scale=30,\n pyramidingPolicy={'.default': 'mode'},\n maxPixels=1e13)\n\n elif export == 'cloud':\n task = ee.batch.Export.image.toCloudStorage(\n image=classified_img,\n description='{}_{}'.format(out_name, yr),\n bucket='wudr',\n fileNamePrefix='{}_{}'.format(yr, out_name),\n region=mask,\n scale=30,\n pyramidingPolicy={'.default': 'mode'},\n maxPixels=1e13)\n else:\n raise NotImplementedError('choose asset or cloud for export')\n\n task.start()\n print(os.path.join(asset_root, '{}_{}'.format(out_name, yr)))",
"def write_clinical_samples_tsv(sheets):\n\n # Header lines, first item must start with #\n # attribute Display Names\n NAMES = [\"#Patient Identifier\", \"Sample Identifier\"]\n # attribute Descriptions\n DESC = [\"#Patient Identifier\", \"Sample Identifier\"]\n # attribute Datatype\n DATATYPE = [\"#STRING\", \"STRING\"]\n # attribute Priority\n PRIORITY = [\"#1\", \"1\"]\n # attribute columns\n COLUMNS = [\"PATIENT_ID\", \"SAMPLE_ID\"]\n\n with open(snakemake.output.samples_tsv, \"w\") as tsvfile:\n writer = csv.writer(tsvfile, delimiter=\"\\t\")\n # write header\n writer.writerow(NAMES)\n writer.writerow(DESC)\n writer.writerow(DATATYPE)\n writer.writerow(PRIORITY)\n writer.writerow(COLUMNS)\n\n for sheet in sheets:\n for p in sheet.bio_entities.values():\n for s in p.bio_samples.values():\n if s.extra_infos[\"isTumor\"]:\n writer.writerow([p.name, s.name])",
"def generate_figures_and_xls(outdir, cols_starts, region2data, ext, xls, group2pos, feature_names, samples):\n all_freqs = []\n # concatenate all pos and samples into one dataframe\n dframes = []\n for ri, (ref, pos) in enumerate(region2data.keys()): #regions): #[3]#; print(ref, pos, mt)\n mer, calls = region2data[(ref, pos)]\n for c, s in zip(calls, samples): \n df = pd.DataFrame(c, columns=feature_names)\n df[\"Strain\"] = s\n df[\"chr_pos\"] = \"%s:%s\"%(ref, pos)\n dframes.append(df)\n # read all tsv files\n df = pd.concat(dframes).dropna().reset_index()\n chr_pos, strains = df[\"chr_pos\"].unique(), df[\"Strain\"].unique() \n \n # compare individual methods\n for clf, method in (\n (iso_new.iForest(ntrees=100, random_state=0), \"GMM+eIF\"), \n (GaussianMixture(random_state=0, n_components=2), \"GMM\"), \n (AgglomerativeClustering(n_clusters=2), \"AggClust\"), \n (KMeans(n_clusters=2), \"KMeans\"), \n (OneClassSVM(), \"OCSVM\"), \n (IsolationForest(random_state=0), \"IF\"), \n (iso_new.iForest(ntrees=100, random_state=0), \"eIF\"), \n (KNeighborsClassifier(), \"KNN\"), \n (RandomForestClassifier(), \"RF\"), \n ):\n fname = method\n print(fname)\n outfn = os.path.join(outdir, \"%s.%s\"%(fname, ext)) \n results = []\n for i, cols_start in enumerate(cols_starts, 1):\n # narrow down the features to only signal intensity & trace\n cols = list(filter(lambda n: n.startswith(cols_start), feature_names)); cols #, \"DT\"\n # compare all samples to 0%\n s0 = samples[0]\n for s in samples[3:]: \n with np.errstate(under='ignore'):\n if \"+\" in method:\n clf2_name = method.split(\"+\")[-1]\n results += get_mod_freq_two_step(df, cols, chr_pos, [s0, s], \"_\".join(cols_start), \n OFFSET=0.5, clf2_name=clf2_name, clf2=clf)\n elif method in (\"KNN\", \"RF\"):\n results += get_mod_freq_clf_train_test(df, cols, chr_pos, [s0, s], samples[1:3], clf, \"_\".join(cols_start))\n else:\n results += get_mod_freq_clf(df, cols, chr_pos, [s0, s], clf, \"_\".join(cols_start))\n \n # and store mod_freq predicted by various methods\n freqs = pd.DataFrame(results, columns=[\"chr_pos\", \"features\", \"mod_freq wt\", \"mod_freq strain\", \"strain\"])\n freqs[\"diff\"] = freqs.max(axis=1)-freqs.min(axis=1); freqs\n for name, pos in group2pos.items(): #((\"negative\", negatives), (\"pU\", pU_pos), (\"Nm\", Nm_pos)):\n freqs.loc[freqs[\"chr_pos\"].isin(pos), \"group\"] = name\n #freqs.to_csv(outfn, sep=\"\\t\"); freqs.head()\n freqs.to_excel(xls, fname, index=False)\n # plot differences between methods\n for group, pos in group2pos.items():\n freqs.loc[freqs[\"chr_pos\"].isin(pos), \"modification\"] = group\n #g = sns.catplot(x=\"strain\", y=\"diff\", hue=\"features\", col=\"modification\", data=freqs, kind=\"box\")#, palette=\"Blues\")\n g = sns.catplot(x=\"strain\", y=\"diff\", hue=\"features\", col=\"modification\", data=freqs, kind=\"point\", ci=None)#, palette=\"Blues\")\n fig = g.fig\n fig.suptitle(method)\n for ax in fig.axes:\n ax.set_xlabel(\"Expected mod_freq\")\n ax.set_ylabel(\"Observed mod_freq [absolute difference between wt & mt]\")\n ax.set_ylim(0, 1)\n fig.savefig(outfn)\n plt.close() # clear axis\n freqs[\"name\"] = fname\n all_freqs.append(freqs)\n return all_freqs",
"def start_output(self, output_lista_cegiel):\r\n self.create_output_file()\r\n\r\n for elem in range(len(output_lista_cegiel)):\r\n self.output_file.create_sheet(output_lista_cegiel[elem], elem)\r\n\r\n self.remowe_first_sheet()",
"def write_shards(\n voxceleb_folder_path: pathlib.Path,\n shards_path: pathlib.Path,\n compress_in_place: bool,\n shard_name_pattern: str = \"shard-{idx:06d}\",\n samples_per_shard: int = 5000,\n sequential_same_speaker_samples: int = 4,\n min_unique_speakers_per_shard: int = 32,\n ensure_all_data_in_shards: bool = False,\n discard_partial_shards: bool = True,\n):\n # make sure output folder exist\n shards_path.mkdir(parents=True, exist_ok=True)\n\n # find all audio files\n audio_files = sorted([f for f in voxceleb_folder_path.rglob(\"*.wav\")])\n\n # create data dictionary {speaker id: List[file_path, sample_key]}}\n data: Dict[str, List[Tuple[str, str, pathlib.Path]]] = defaultdict(list)\n\n # track statistics on data\n all_speaker_ids = set()\n all_youtube_ids = set()\n all_sample_ids = set()\n youtube_id_per_speaker = defaultdict(list)\n sample_keys_per_speaker = defaultdict(list)\n num_samples = 0\n all_keys = set()\n\n for f in audio_files:\n # path should be\n # ${voxceleb_folder_path}/wav/speaker_id/youtube_id/utterance_id.wav\n speaker_id = f.parent.parent.name\n youtube_id = f.parent.name\n utterance_id = f.stem\n\n # create a unique key for this sample\n key = f\"{speaker_id}{ID_SEPARATOR}{youtube_id}{ID_SEPARATOR}{utterance_id}\"\n\n if key in all_keys:\n raise ValueError(\"found sample with duplicate key\")\n else:\n all_keys.add(key)\n\n # store statistics\n num_samples += 1\n\n all_speaker_ids.add(speaker_id)\n all_youtube_ids.add(youtube_id)\n all_sample_ids.add(key)\n\n youtube_id_per_speaker[speaker_id].append(youtube_id)\n sample_keys_per_speaker[speaker_id].append(key)\n\n # store data in dict\n data[speaker_id].append((key, speaker_id, f))\n\n # randomly shuffle the list of all samples for each speaker\n for speaker_id in data.keys():\n random.shuffle(data[speaker_id])\n\n # determine a specific speaker_id label for each speaker_id\n speaker_id_to_idx = {\n speaker_id: idx for idx, speaker_id in enumerate(sorted(all_speaker_ids))\n }\n\n # write a meta.json file which contains statistics on the data\n # which will be written to shards\n all_speaker_ids = list(all_speaker_ids)\n all_youtube_ids = list(all_youtube_ids)\n all_sample_ids = list(all_sample_ids)\n\n meta_dict = {\n \"speaker_ids\": all_speaker_ids,\n \"youtube_ids\": all_youtube_ids,\n \"sample_ids\": all_sample_ids,\n \"speaker_id_to_idx\": speaker_id_to_idx,\n \"youtube_ids_per_speaker\": youtube_id_per_speaker,\n \"sample_ids_per_speaker\": sample_keys_per_speaker,\n \"num_samples\": num_samples,\n \"num_speakers\": len(all_speaker_ids),\n }\n\n with (shards_path / \"meta.json\").open(\"w\") as f:\n json.dump(meta_dict, f)\n\n # split the data into shards such that each shard has at most\n # `samples_per_shard` samples and that the sequential order in the\n # shard is:\n # 1 = sample of speaker id `i`\n # ...\n # sequential_same_speaker_samples =sample of speaker id `i`\n # sequential_same_speaker_samples + 1 = sample of speaker id `j`\n # etc\n shards_list = []\n\n def samples_left():\n num_samples_left = sum(len(v) for v in data.values())\n num_valid_speakers = sum(\n len(v) >= sequential_same_speaker_samples for v in data.values()\n )\n\n # a shard should contain at least 2 different speakers\n if num_valid_speakers >= 2 or ensure_all_data_in_shards:\n return num_samples_left\n else:\n return 0\n\n def valid_speakers(n: int, previous_id: Optional[str] = None):\n return [k for k in data.keys() if len(data[k]) >= n and k != previous_id]\n\n def pop_n_samples(\n n: int, current_speakers_in_shard: Set[str], previous_id: Optional[str] = None\n ):\n valid_speaker_ids = valid_speakers(n, previous_id)\n\n if len(current_speakers_in_shard) < min_unique_speakers_per_shard:\n valid_speaker_ids = [\n sid for sid in valid_speaker_ids if sid not in current_speakers_in_shard\n ]\n\n if len(valid_speaker_ids) == 0:\n raise ValueError(\n f\"shard cannot be guaranteed to have {min_unique_speakers_per_shard=}\"\n )\n\n samples_per_speaker = [len(data[k]) for k in valid_speaker_ids]\n random_speaker_id = random.choices(valid_speaker_ids, samples_per_speaker)[0]\n current_speakers_in_shard.add(random_speaker_id)\n popped_samples = []\n\n for _ in range(n):\n sample_list = data[random_speaker_id]\n popped_samples.append(\n sample_list.pop(random.randint(0, len(sample_list) - 1))\n )\n\n return popped_samples, random_speaker_id, current_speakers_in_shard\n\n # write shards\n while samples_left() > 0:\n shard = []\n speakers_in_shard = set()\n previous = None\n\n print(\n f\"determined shards={len(shards_list):>4}\\t\"\n f\"samples left={samples_left():>9,d}\\t\"\n f\"speakers left=\"\n f\"{len(valid_speakers(sequential_same_speaker_samples, previous)):>4,d}\"\n )\n while len(shard) < samples_per_shard and samples_left() > 0:\n samples, previous, speakers_in_shard = pop_n_samples(\n n=sequential_same_speaker_samples,\n current_speakers_in_shard=speakers_in_shard,\n previous_id=previous,\n )\n for key, speaker_id, f in samples:\n shard.append((key, speaker_id_to_idx[speaker_id], f))\n\n shards_list.append(shard)\n\n # assert all data is in a shard\n if ensure_all_data_in_shards:\n assert sum(len(v) for v in data.values()) == 0\n\n # remove any shard which does share the majority amount of samples\n if discard_partial_shards:\n unique_len_count = defaultdict(int)\n for lst in shards_list:\n unique_len_count[len(lst)] += 1\n\n if len(unique_len_count) > 2:\n raise ValueError(\"expected at most 2 unique lengths\")\n\n if len(unique_len_count) == 0:\n raise ValueError(\"expected at least 1 unique length\")\n\n majority_len = -1\n majority_count = -1\n for unique_len, count in unique_len_count.items():\n if count > majority_count:\n majority_len = unique_len\n majority_count = count\n\n shards_list = [lst for lst in shards_list if len(lst) == majority_len]\n\n # write shards\n shards_path.mkdir(exist_ok=True, parents=True)\n\n # seems like disk write speed only allows for 1 process anyway :/\n with multiprocessing.Pool(processes=1) as p:\n for idx, shard_content in enumerate(shards_list):\n args = {\n \"shard_name\": shard_name_pattern.format(idx=idx),\n \"shards_path\": shards_path,\n \"data_tpl\": shard_content,\n \"compress\": compress_in_place,\n }\n p.apply_async(\n _write_shard,\n kwds=args,\n error_callback=lambda x: print(\n f\"error in apply_async ``_write_shard!\\n{x}\"\n ),\n )\n\n p.close()\n p.join()",
"def write(cls, experiment: Experiment):\n cls.__mutex.acquire()\n os.makedirs('./temp', exist_ok=True)\n worksheet = cls.__workbook.add_worksheet(experiment.name)\n for i, value in enumerate(experiment.values.items()):\n worksheet.write(0, i, value[0])\n worksheet.write_column(1, i, value[1])\n if experiment.model == 'accuracy':\n # cls.__add_accuracy_plot(worksheet, value)\n cls.test(worksheet, value)\n\n pass\n\n if experiment.model == 'performance':\n cls.test(worksheet, value)\n pass\n # cls.__add_accuracy_plot(worksheet, value)\n\n cls.__mutex.release()",
"def create_export_files(n,input_choice,timing,min_hull_per):\n\n\n\texists = os.path.isdir('analysis')\n\tif exists:\n\t\tf = open('analysis/results.csv','a',newline='')\n\t\tresults = csv.writer(f)\n\telse:\n\t\tos.mkdir('analysis')\n\t\tf = open('analysis/results.csv','w',newline='')\n\t\tresults = csv.writer(f)\n\t\tresults.writerow(['Algo','Size of Input','Min. Hull Pts Per','Type of Input','Timing'])\n\n\n\tresults.writerow(['Graham Scan',n,min_hull_per,input_choice,timing])",
"def convert_testing_data(mfccPath):\n inputlist, inputnamelist = ark_parser(mfccPath, 'test.ark')\n\n print(\"%d sample in testing set\" % len(inputlist))\n with open('./test_data.pkl', 'wb') as test_data:\n pickle.dump(inputlist, test_data)\n \n with open('./test_name.pkl', 'wb') as test_name:\n pickle.dump(inputnamelist, test_name)",
"def export_all_to_excel(input_hdf5, out_directory_path):\n data_store = pd.HDFStore(input_hdf5) # Opening the HDF5 file\n for each_key in data_store.keys():\n data_store[each_key].to_excel(out_directory_path + each_key + \".xlsx\")\n # '/' missing between folder name and\n # file name because file name already includes it.\n data_store.close()\n\n print(\"-- Dataframes written to Excel files (.xlsx) --\")",
"def export(self):\n # check that session state is exportable\n if self.general_parser is None or \\\n self.specific_parser is None or \\\n ((self.audio_parser is None) and (self.audio_data_parser is None)) or \\\n ((self.video_parser is None) and (self.video_data_parser is None)) or \\\n self.month_selected is None or \\\n self.unique_audio_found is False or \\\n self.unique_video_found is False:\n\n self.cant_export_label.grid(row=13, column=6, columnspan=1, rowspan=5)\n raise Exception(\"you need to load the general, \"\n \"month-specific, \"\n \"audio and video words first\")\n\n self.cant_export_label.grid_remove()\n export_file = tkFileDialog.asksaveasfilename() # ask for output file\n\n unique_words = self.top_unique_audio + self.top_unique_video # concatenate\n unique_words = sorted(unique_words, key=self.get_count_from_rank, reverse=True) # sort\n\n with open(export_file, \"w\") as file:\n file.write(\"\\\"rank\\\" \\\"source\\\" \\\"word\\\" \\\"in_general\\\" \\\"count\\\"\\n\\n\") # print header\n\n for rank in self.top_unique_audio:\n\n for entry in rank:\n\n file.write(str(entry.rank) + \" audio \" +\n entry.word + \" \" +\n str(entry.in_general) + \" \" +\n str(entry.count) + \"\\n\")\n\n for rank in self.top_unique_video:\n\n for entry in rank:\n\n file.write(str(entry.rank) + \" video \" +\n entry.word + \" \" +\n str(entry.in_general) + \" \" +\n str(entry.count) + \"\\n\")"
]
| [
"0.65062565",
"0.63379425",
"0.60326344",
"0.60011256",
"0.5988671",
"0.59117097",
"0.5885265",
"0.5811275",
"0.5783409",
"0.5748259",
"0.56417507",
"0.56232613",
"0.5588807",
"0.55753165",
"0.5526767",
"0.5514415",
"0.5438754",
"0.5429626",
"0.54096013",
"0.53891826",
"0.5375554",
"0.5365905",
"0.5344768",
"0.53366446",
"0.52791643",
"0.5270642",
"0.52630275",
"0.5253562",
"0.52461916",
"0.5227679"
]
| 0.7096843 | 0 |
Export samplesheets for caliper machine. | def export_caliper(args):
if args.type == 'normalise':
clarity_epp.export.caliper.samplesheet_normalise(lims, args.process_id, args.output_file)
elif args.type == 'dilute':
clarity_epp.export.caliper.samplesheet_dilute(lims, args.process_id, args.output_file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_generate_sample_sheet(self):\n pass",
"def export_tapestation(args):\n clarity_epp.export.tapestation.samplesheet(lims, args.process_id, args.output_file)",
"def test_export_spreadsheet(self):\r\n client = self.getClient()\r\n if client:\r\n exp = [['#SampleID', 'DOB'],\r\n ['#Example mapping file for the QIIME analysis package. '\r\n 'These 9 samples are from a study of the effects of exercise '\r\n 'and diet on mouse cardiac physiology (Crawford, et al, '\r\n 'PNAS, 2009).'], ['PC.354', '20061218'],\r\n ['PC.355', '20061218'], ['PC.356', '20061126'],\r\n ['PC.481', '20070314'], ['PC.593', '20071210'],\r\n ['PC.607', '20071112'], ['PC.634', '20080116'],\r\n ['PC.635', '20080116'], ['PC.636', '20080116']]\r\n obs = _export_spreadsheet(client, self.spreadsheet_key,\r\n self.worksheet_id, ['#SampleID', 'DOB'])\r\n self.assertEqual(obs, exp)\r\n else:\r\n raise GoogleSpreadsheetConnectionError(\"Cannot execute test \"\r\n \"without an active Internet connection.\")",
"def driver():\n\n directory = r\"C:/Users/Aftab Alam/Documents/GitHub\"\n directory = directory + r\"/SRM-placement-analyser/data/\"\n fileList = [directory+\"InfosysResult.xlsx\",directory+\"TCSResult.xlsx\",directory+\"CognizantResult.xlsx\",directory+\"WiproResult.xlsx\"]\n \n listOfPlaced = extractCommonData.extractCommonData(fileList)\n createNewExcelSheet(directory,listOfPlaced)",
"def start_output(self):\r\n self.create_output_file()\r\n\r\n for elem in range(len(self.output_zakladki)):\r\n self.output_file.create_sheet(self.output_zakladki[elem], elem)\r\n\r\n self.remowe_first_sheet()",
"def main():\n\n gephyrin_df = gephyrin_pairwise()\n cav31_df = cav31_pairwise()\n synapsin_df = synapsin_pairwise()\n psd_df = psd95_pairwise()\n vglut1_df = vglut1_pairwise()\n\n\n sheet_name = 'Pairwise'\n fn = 'pairwise_comparisons.xlsx'\n df_list = [synapsin_df, vglut1_df, psd_df, gephyrin_df, cav31_df]\n aa.write_dfs_to_excel(df_list, sheet_name, fn)",
"def multi_sheet(self):\n # Initialize #\n all_sheets = []\n # Loop #\n for name in self.handle.sheet_names:\n sheet = self.handle.parse(name)\n sheet.insert(0, \"nace\", name)\n all_sheets.append(sheet)\n # Write #\n df = pandas.concat(all_sheets)\n df.to_csv(str(self.dest), **self.kwargs)",
"def main():\n store = file.Storage('token.json')\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets('credentials.json', SCOPES)\n creds = tools.run_flow(flow, store)\n service = build('sheets', 'v4', http=creds.authorize(Http()))\n\n # Call the Sheets API\n SPREADSHEET_ID = '1whfqnqc3TM8ui4hjLqCQq9ZVN5kMuTQrRodXvFreZxM'\n result = service.spreadsheets().get(spreadsheetId = SPREADSHEET_ID).execute()\n spreadsheetUrl = result['spreadsheetUrl']\n\n exportUrl = re.sub(\"\\/edit$\", '/export', spreadsheetUrl)\n headers = { 'Authorization': 'Bearer ' + creds.access_token }\n params = { 'format': 'csv',\n 'gid': 0 } \n queryParams = urllib.urlencode(params)\n url = exportUrl + '?' + queryParams\n response = requests.get(url, headers = headers)\n with open(sys.argv[1], 'wb') as csvFile:\n csvFile.write(response.content)",
"def export_tecan(args):\n clarity_epp.export.tecan.samplesheet(lims, args.process_id, args.type, args.output_file)",
"def generate_sample_sheet(self):\n pool = self.pool\n bcl2fastq_sample_ids = []\n i7_names = []\n i7_sequences = []\n i5_names = []\n i5_sequences = []\n wells = []\n plate = pool.container.external_id\n sample_ids = []\n sequencer_type = self.sequencer.equipment_type\n\n for component in pool.components:\n lp_composition = component['composition']\n # Get the well information\n wells.append(lp_composition.container.well_id)\n # Get the i7 index information\n i7_comp = lp_composition.i7_composition.primer_set_composition\n i7_names.append(i7_comp.external_id)\n i7_sequences.append(i7_comp.barcode)\n # Get the i5 index information\n i5_comp = lp_composition.i5_composition.primer_set_composition\n i5_names.append(i5_comp.external_id)\n i5_sequences.append(i5_comp.barcode)\n # Get the sample id\n sample_id = lp_composition.normalized_gdna_composition.\\\n gdna_composition.sample_composition.content\n sample_ids.append(sample_id)\n\n # Transform te sample ids to be bcl2fastq-compatible\n bcl2fastq_sample_ids = [\n SequencingProcess._bcl_scrub_name(sid) for sid in sample_ids]\n # Reverse the i5 sequences if needed based on the sequencer\n i5_sequences = SequencingProcess._sequencer_i5_index(\n sequencer_type, i5_sequences)\n\n data = SequencingProcess._format_sample_sheet_data(\n bcl2fastq_sample_ids, i7_names, i7_sequences, i5_names,\n i5_sequences, wells=wells, sample_plate=plate,\n description=sample_ids, sample_proj=self.run_name,\n lanes=self.lanes, sep=',')\n\n contacts = {c.name: c.email for c in self.contacts}\n pi = self.principal_investigator\n principal_investigator = {pi.name: pi.email}\n sample_sheet_dict = {\n 'comments': SequencingProcess._format_sample_sheet_comments(\n principal_investigator=principal_investigator,\n contacts=contacts),\n 'IEMFileVersion': '4',\n 'Investigator Name': pi.name,\n 'Experiment Name': self.experiment,\n 'Date': str(self.date),\n 'Workflow': 'GenerateFASTQ',\n 'Application': 'FASTQ Only',\n 'Assay': self.assay,\n 'Description': '',\n 'Chemistry': 'Default',\n 'read1': self.fwd_cycles,\n 'read2': self.rev_cycles,\n 'ReverseComplement': '0',\n 'data': data}\n return SequencingProcess._format_sample_sheet(sample_sheet_dict)",
"def export(self):\n if len(self.records) == 0:\n exit_message = \"Exiting. There are no records for {} {} to export.\".format(self.args.date.strftime(\"%B\"), self.year)\n sys.exit(exit_message)\n\n total_days = (self.args.date.replace(month = self.args.date.month % 12 +1, day = 1)-timedelta(days=1)).day\n start_month = self.args.date.replace(day = 1)\n end_month = self.args.date.replace(day = total_days)\n workdays = self.netto_workdays(start_month, end_month, weekend_days=(5,6))\n template_file = os.path.join(self.config[\"templates_dir\"], \"template_timesheet_{}_days.xlsx\".format(workdays))\n\n export_file = os.path.join(self.config[\"exports_dir\"], \"timesheet_{}_{}.xlsx\".format(self.year, self.month_str))\n\n # set locale to use weekdays, months full name in german\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n wb = load_workbook(template_file)\n ws = wb.active\n ws.cell(row=7, column=4).value = self.config[\"name\"]\n month_year_str = \"{} {}\".format(self.args.date.strftime(\"%B\"), self.year)\n ws.cell(row=8, column=4).value = month_year_str\n row = 12\n for record in self.records:\n col = 2\n date = datetime.strptime(record[\"date\"], \"%d.%m.%Y\")\n ws.cell(row=row, column=col).value = date.strftime(\"%A\")\n col += 1\n ws.cell(row=row, column=col).value = date\n col += 1\n if \"special\" in record.keys() and record[\"special\"] == \"true\":\n ws.cell(row=row, column=9).value = 8.00\n col += 4\n else:\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_break\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_break\"], \"%H:%M\").time()\n col += 4\n ws.cell(row=row, column=col).value = record[\"comment\"]\n row += 1\n wb.save(export_file)\n return True",
"def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_by_cause_10yr_average_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response",
"def export(self):\n\n rpt_date = datetime.now()\n filename = 'ministerial_268_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response",
"def make_custom_sample_sheet(input_sample_sheet,output_sample_sheet=None,\n lanes=None,fmt=None):\n # Load the sample sheet data\n sample_sheet = IlluminaData.SampleSheet(input_sample_sheet)\n # Determine the column names for this format\n if sample_sheet.format == 'CASAVA':\n sample_col = 'SampleID'\n project_col = 'SampleProject'\n elif sample_sheet.format == 'IEM':\n sample_col = 'Sample_ID'\n project_col = 'Sample_Project'\n else:\n raise Exception(\"Unknown sample sheet format: %s\" %\n sample_sheet.format)\n # Add project names if not supplied\n for line in sample_sheet:\n if not line[project_col]:\n line[project_col] = line[sample_col]\n # Fix other problems\n sample_sheet.fix_illegal_names()\n sample_sheet.fix_duplicated_names()\n # Select subset of lanes if requested\n if lanes is not None:\n logging.debug(\"Updating to include only specified lanes: %s\" %\n ','.join([str(l) for l in lanes]))\n i = 0\n while i < len(sample_sheet):\n line = sample_sheet[i]\n if line['Lane'] in lanes:\n logging.debug(\"Keeping %s\" % line)\n i += 1\n else:\n del(sample_sheet[i])\n # Write out new sample sheet\n if output_sample_sheet is not None:\n sample_sheet.write(output_sample_sheet,fmt=fmt)\n return sample_sheet",
"def export(self):\n\n rpt_date = datetime.now()\n filename = 'quarterly_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response",
"def main():\n\n store = file.Storage('token.json')\n creds = store.get() # set to None, for re-authentication\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets('credentials.json', SCOPES)\n creds = tools.run_flow(flow, store)\n service = build('sheets', 'v4', http=creds.authorize(Http()))\n\n cards = []\n i = 0\n with open(\"The landing cards.txt\") as f:\n for l, llint in enumerate(f):\n for line in llint.split('\\n\\n'):\n if line == '':\n continue\n if '________________' in line:\n if i % 2 == 0:\n cards.append([''])\n else:\n cards[-1].append('')\n i += 1\n cards[-1][-1] += line.replace('________________', '')\n\n\n\n write('Templates!A1:B100', cards, service)",
"def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.ministerial.get_excel_sheet(rpt_date, book)\n self.ministerial_auth.get_excel_sheet(rpt_date, book)\n self.ministerial_268.get_excel_sheet(rpt_date, book)\n self.quarterly.get_excel_sheet(rpt_date, book)\n self.by_tenure.get_excel_sheet(rpt_date, book)\n self.by_cause.get_excel_sheet(rpt_date, book)\n self.region_by_tenure.get_excel_sheet(rpt_date, book)\n self.indicator.get_excel_sheet(rpt_date, book)\n self.by_cause_10YrAverage.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 1')\n book.save(response)\n\n return response",
"def exporter():\n Session = modules.db_connect.connect()\n session = Session()\n report = xlsxwriter.Workbook('perception_report.xlsx')\n top_row_format = report.add_format({'bold': True})\n top_row_format.set_border(style=1)\n top_row_format.set_bg_color('#B8B8B8')\n\n \"\"\"Black row format at the top of each host detailed info\"\"\"\n black_row_format = report.add_format()\n black_row_format.set_border(style=1)\n black_row_format.set_bg_color('#000000')\n\n \"\"\"Detailed host row format\"\"\"\n host_row_format = report.add_format()\n host_row_format.set_border(style=1)\n host_row_format.set_bg_color('#CCCCCC')\n\n \"\"\"Format for text in row with host info\"\"\"\n host_row_wrapped_format = report.add_format()\n host_row_wrapped_format.set_border(style=1)\n host_row_wrapped_format.set_bg_color('#CCCCCC')\n host_row_wrapped_format.set_text_wrap('vjustify')\n\n \"\"\"Format description row in NSE output\"\"\"\n host_nse_output_top_format = report.add_format({'bold': True})\n host_nse_output_top_format.set_border(style=1)\n host_nse_output_top_format.set_bg_color('#B8B8B8')\n\n \"\"\"Format test row in NSE output\"\"\"\n host_nse_output_format = report.add_format()\n host_nse_output_format.set_border(style=1)\n host_nse_output_format.set_bg_color('#CCCCCC')\n\n \"\"\"Build the host_overview_worksheet\"\"\"\n host_overview_worksheet = report.add_worksheet()\n\n \"\"\"Build the host_detail_worksheet\"\"\"\n host_detail_worksheet = report.add_worksheet()\n\n \"\"\"Size up the overview worksheet\"\"\"\n host_overview_worksheet.set_column('B:B', 24)\n host_overview_worksheet.set_column('C:C', 15)\n host_overview_worksheet.set_column('D:D', 15)\n host_overview_worksheet.set_column('E:E', 15)\n host_overview_worksheet.set_column('F:F', 15)\n host_overview_worksheet.set_column('G:G', 20)\n host_overview_worksheet.set_column('H:H', 15)\n\n \"\"\"Size up the detail worksheet\"\"\"\n host_detail_worksheet.set_column('B:B', 38)\n host_detail_worksheet.set_column('C:C', 16)\n host_detail_worksheet.set_column('D:D', 16)\n host_detail_worksheet.set_column('E:E', 28)\n host_detail_worksheet.set_column('F:F', 15)\n host_detail_worksheet.set_column('H:G', 20)\n host_detail_worksheet.set_column('H:H', 25)\n host_detail_worksheet.set_column('I:I', 10)\n\n \"\"\"Description row for host overview\"\"\"\n host_overview_worksheet.write('B2', 'Hostname', top_row_format)\n host_overview_worksheet.write('C2', 'IP v4 Address', top_row_format)\n host_overview_worksheet.write('D2', 'IP v6 Address', top_row_format)\n host_overview_worksheet.write('E2', 'MAC Address', top_row_format)\n host_overview_worksheet.write('F2', 'MAC Vendor', top_row_format)\n host_overview_worksheet.write('G2', 'Operating System', top_row_format)\n host_overview_worksheet.write('H2', 'Host Type', top_row_format)\n\n \"\"\"Query the database for the hosts\"\"\"\n inventory_hosts = session.query(InventoryHost).all()\n\n \"\"\"Build overview worksheet\"\"\"\n overview_row = 2\n overview_col = 1\n for host in inventory_hosts:\n host_overview_worksheet.write(overview_row, overview_col, host.host_name, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 1, host.ipv4_addr, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 2, host.ipv6_addr, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 3, host.macaddr, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 4, host.mac_vendor.name, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 5, host.product.name, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 6, host.host_type, host_row_format)\n overview_row += 1\n\n \"\"\"Build detailed worksheet\"\"\"\n detail_row = 2\n detail_col = 1\n for host in inventory_hosts:\n\n \"\"\"Add the black row to start host detail info\"\"\"\n host_detail_worksheet.set_row(detail_row, 5)\n host_detail_worksheet.write(detail_row, detail_col, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, '', black_row_format)\n detail_row += 1\n\n \"\"\"Add row detail info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Hostname', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'IP v4 Address', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'IP v6 Address', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'MAC Address', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'MAC Vendor', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'Host Type', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'Operating System', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'Version', top_row_format)\n detail_row += 1\n\n \"\"\"Add host info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, host.host_name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, host.ipv4_addr, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, host.ipv6_addr, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, host.macaddr, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, host.mac_vendor.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, host.host_type, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, host.product.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, host.product.version, host_row_format)\n detail_row += 2\n\n \"\"\"If there is no host nse script, just say so.\"\"\"\n if not host.host_nse_scripts:\n host_detail_worksheet.write(detail_row, detail_col, 'Host NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n host_detail_worksheet.write(detail_row, detail_col, 'No Script Name', host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'No Script Output', host_row_wrapped_format)\n detail_row += 2\n else:\n\n \"\"\"Add the row detail\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Host NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n\n \"\"\"Grab all the scripts\"\"\"\n for host_scripts in host.host_nse_scripts:\n\n \"\"\"Count output the lines so we know what to merge\"\"\"\n lines = host_scripts.output.count('\\n')\n\n if lines > 0:\n\n \"\"\"Merge the rows and write the name and output\"\"\"\n host_detail_worksheet.merge_range(detail_row, detail_col, detail_row + lines, detail_col,\n host_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines, detail_col + 7,\n host_scripts.output, host_row_wrapped_format)\n detail_row += 1\n else:\n\n \"\"\"Single line output\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, host_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines, detail_col + 7,\n host_scripts.output, host_row_wrapped_format)\n detail_row += 1\n\n if not host.inventory_svcs:\n\n \"\"\"If there are no services for this host tell me\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Protocol', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'Port', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'Name', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'Svc Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'Extra Info', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'Version', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'Update', top_row_format)\n detail_row += 1\n\n host_detail_worksheet.write(detail_row, detail_col, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'no services', host_row_format)\n detail_row += 1\n\n else:\n for ports in host.inventory_svcs:\n\n \"\"\"Host services row info\"\"\"\n detail_row += 1\n host_detail_worksheet.write(detail_row, detail_col, 'Protocol', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'Port', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'Name', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'Svc Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'Extra Info', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'Version', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'Update', top_row_format)\n detail_row += 1\n\n \"\"\"Write the service info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, ports.protocol, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, ports.portid, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, ports.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, ports.svc_product, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, ports.extra_info, host_row_format)\n try:\n\n \"\"\"There may not be product info, but try.\"\"\"\n host_detail_worksheet.write(detail_row, detail_col + 5, ports.product.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, ports.product.version, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, ports.product.product_update,\n host_row_format)\n detail_row += 1\n except AttributeError:\n\n \"\"\"Just write unknown if there is no product info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col + 5, 'unknown', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'unknown', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'unknown', host_row_format)\n detail_row += 1\n\n if not ports.svc_nse_scripts:\n\n \"\"\"If there is no NSE script info just say so.\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Svc NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n host_detail_worksheet.write(detail_row, detail_col, 'No Script Name', host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'No Script Output', host_row_wrapped_format)\n detail_row += 2\n\n else:\n\n \"\"\"Service Script row detail\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Svc NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n\n \"\"\"Grab all the scripts\"\"\"\n for nse_scripts in ports.svc_nse_scripts:\n\n \"\"\"Count the lines in the output for merging\"\"\"\n lines = nse_scripts.output.count('\\n')\n\n if lines > 0:\n\n \"\"\"Merge the rows and write the name and output\"\"\"\n host_detail_worksheet.merge_range(detail_row, detail_col, detail_row + lines, detail_col,\n nse_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines, detail_col + 7,\n nse_scripts.output, host_row_wrapped_format)\n detail_row += 1\n else:\n\n \"\"\"Single line output\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, nse_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines,\n detail_col + 7, nse_scripts.output,\n host_row_wrapped_format)\n detail_row += 1\n\n detail_row += 1\n report.close()\n session.close()",
"def shred_sheets(subdomain, audit_date, input_file, _format):\r\n name = extract_dir_name(input_file)\r\n fname = PurePath(input_file).name.__str__()\r\n try:\r\n os.makedirs(name)\r\n except:\r\n pass\r\n\r\n wb = pd.ExcelFile(input_file)\r\n for ws in wb.sheet_names:\r\n data = pd.read_excel(input_file, sheet_name=ws)\r\n # add constants\r\n data.index.names = ['ix']\r\n data['subdomin'] = subdomain\r\n data['audit_date'] = audit_date\r\n\r\n # strip chars we don't want in colum names\r\n cols = data.columns\r\n renamed = []\r\n for col in cols:\r\n col = re.sub('[^a-zA-Z0-9]', '', col)\r\n renamed.append(col)\r\n\r\n data.columns = renamed\r\n\r\n # build output formats\r\n if _format == 'mongo':\r\n client = MongoClient('mongodb://localhost:27017/')\r\n db = client.Sitebulb\r\n cl = db.August5\r\n\r\n try:\r\n cl.insert_many(data.to_dict('records'))\r\n except Exception as e:\r\n click.secho(f'\\nERROR in [{input_file},{ws}] -- {e}', fg='red')\r\n continue\r\n\r\n if _format == 'json' or _format == 'all':\r\n try:\r\n new_file = os.path.join(name, fname + '~' + ws + '.json')\r\n data.to_json(new_file, orient=\"records\")\r\n except Exception as e:\r\n click.secho(f'\\nERROR in [{input_file},{ws}] -- {e}', fg='red')\r\n continue\r\n\r\n if _format == 'csv' or _format == 'all':\r\n try:\r\n new_file = os.path.join(name, fname + '~' + ws + '.csv')\r\n data.to_csv(new_file)\r\n except Exception as e:\r\n click.secho(f'\\nERROR in [{input_file},{ws}] -- {e}', fg='red')\r\n continue",
"def dfs_tabs(df_list, sheet_list, file_name):\n\n writer = pd.ExcelWriter(file_name,engine='xlsxwriter') \n for dataframe, sheet in zip(df_list, sheet_list):\n dataframe.to_excel(writer, sheet_name=sheet, startrow=0 , startcol=0, index=False) \n writer.save()",
"def export(self):\n\n rpt_date = datetime.now()\n filename = 'ministerial_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response",
"def export_all_to_excel(input_hdf5, out_directory_path):\n data_store = pd.HDFStore(input_hdf5) # Opening the HDF5 file\n for each_key in data_store.keys():\n data_store[each_key].to_excel(out_directory_path + each_key + \".xlsx\")\n # '/' missing between folder name and\n # file name because file name already includes it.\n data_store.close()\n\n print(\"-- Dataframes written to Excel files (.xlsx) --\")",
"def start_output(self, output_lista_cegiel):\r\n self.create_output_file()\r\n\r\n for elem in range(len(output_lista_cegiel)):\r\n self.output_file.create_sheet(output_lista_cegiel[elem], elem)\r\n\r\n self.remowe_first_sheet()",
"def fill_export_section():\n section = _SectionData(\"Export\")\n section.props.append((\"ExportScale\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.export_scale)))\n section.props.append((\"ApplyModifiers\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_apply_modifiers))))\n section.props.append((\"ExcludeEdgesplit\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_exclude_edgesplit))))\n section.props.append((\"IncludeEdgesplit\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_include_edgesplit))))\n section.props.append((\"ActiveUVOnly\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_active_uv_only))))\n section.props.append((\"ExportVertexGroups\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_vertex_groups))))\n section.props.append((\"ExportVertexColor\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_vertex_color))))\n section.props.append((\"ExportVertexColorType\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.export_vertex_color_type)))\n section.props.append((\"ExportVertexColorType7\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.export_vertex_color_type_7)))\n # section.props.append((\"ExportAnimFile\", info.get_default_prop_value(bpy.types.GlobalSCSProps.export_anim_file)))\n section.props.append((\"ExportPimFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_pim_file))))\n section.props.append((\"OutputType\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.export_output_type)))\n section.props.append((\"ExportPitFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_pit_file))))\n section.props.append((\"ExportPicFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_pic_file))))\n section.props.append((\"ExportPipFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_pip_file))))\n section.props.append((\"SignExport\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_write_signature))))\n return section",
"def genOffice(self, out, selProjects, measures):\n projects = [] \n for project in self.projects.values():\n if selProjects == \"\" or project.name in selProjects:\n projects.append(project)\n benchmarkMerge = self.merge(projects)\n \n sheet = Spreadsheet(benchmarkMerge, measures)\n for project in projects:\n for runspec in project:\n sheet.addRunspec(runspec)\n sheet.finish()\n sheet.printSheet(out)",
"def request_validation_extract(file_prefix='validation'):\n roi = ee.FeatureCollection(GEO_DOMAIN)\n plots = ee.FeatureCollection(None).filterBounds(roi)\n image_list = list_assets('users/dgketchum/IrrMapper/version_2')\n\n for yr in YEARS:\n yr_img = [x for x in image_list if x.endswith(str(yr))]\n coll = ee.ImageCollection(yr_img)\n classified = coll.mosaic().select('classification')\n\n filtered = plots.filter(ee.Filter.eq('YEAR', yr))\n\n plot_sample_regions = classified.sampleRegions(\n collection=filtered,\n properties=['POINT_TYPE', 'YEAR', 'FID'],\n scale=30)\n\n task = ee.batch.Export.table.toCloudStorage(\n plot_sample_regions,\n description='{}_{}'.format(file_prefix, yr),\n bucket='wudr',\n fileNamePrefix='{}_{}'.format(file_prefix, yr),\n fileFormat='CSV')\n\n task.start()\n print(yr)",
"def write(cls, experiment: Experiment):\n cls.__mutex.acquire()\n os.makedirs('./temp', exist_ok=True)\n worksheet = cls.__workbook.add_worksheet(experiment.name)\n for i, value in enumerate(experiment.values.items()):\n worksheet.write(0, i, value[0])\n worksheet.write_column(1, i, value[1])\n if experiment.model == 'accuracy':\n # cls.__add_accuracy_plot(worksheet, value)\n cls.test(worksheet, value)\n\n pass\n\n if experiment.model == 'performance':\n cls.test(worksheet, value)\n pass\n # cls.__add_accuracy_plot(worksheet, value)\n\n cls.__mutex.release()",
"def export(self):\n rpt_date = datetime.now()\n filename = 'bushfire_regionbytenure_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response",
"def export(self):\n\n rpt_date = datetime.now()\n filename = 'ministerial_auth_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response",
"def output_to_spreadsheet(routers_switches, phones, aps, others, failed_devices, file_location):\n # Creates Excel workbook and worksheets\n wb = Workbook()\n routers_switches_ws = wb.active\n routers_switches_ws.title = 'Routers_Switches'\n phones_ws = wb.create_sheet('Phones')\n aps_ws = wb.create_sheet('APs')\n others_ws = wb.create_sheet('Others')\n failed_ws = wb.create_sheet('Failed')\n\n alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n\n # Checks if phones contain directory number and description from CUCM export merge\n if any('description' in phone for phone in phones):\n phone_string = 'CUCMPhone'\n else:\n phone_string = 'Phone'\n\n neighbor_count = 1\n # Sets 'neighbor_count' to length of longest neighbor list in routers_switches dictionaries\n for rt_sw in routers_switches:\n if rt_sw['connection_attempt'] == 'Failed':\n if len(rt_sw['neighbors']) > neighbor_count:\n neighbor_count = len(rt_sw['neighbors'])\n\n def write_header(worksheet, device_type):\n \"\"\"\n :param device_type: 'RouterSwitch', 'Phone', 'CUCMPhone', 'WAP', 'Other', or 'Failed'\n :param worksheet: Device worksheet\n :return: int(header_length), list(header)\n \"\"\"\n header = ['Hostname', 'IP Address', 'Model', 'Software Version']\n if device_type == 'RouterSwitch':\n header += ['Serial', 'Connection Type', 'ROMMON', 'Connection Attempt', 'Discovery Status']\n for n in range(1, neighbor_count + 1):\n header += [f'Neighbor {n} Hostname', f'Neighbor {n} IP Address', f'Local Interface to Neighbor {n}',\n f'Neighbor {n} Interface']\n elif device_type == 'Phone' or device_type == 'CUCMPhone':\n header += ['Voice VLAN', 'MAC Address', 'Switch Hostname', 'Switch IP Address', 'Switchport']\n if device_type == 'CUCMPhone':\n header += ['Description', 'Main Directory Number']\n elif device_type == 'WAP':\n header += ['Switch Hostname', 'Switch IP Address', 'Switchport']\n elif device_type == 'Other':\n header += ['Neighbor Hostname', 'Neighbor IP Address', 'Local Interface to Neighbor', 'Neighbor Interface']\n elif device_type == 'Failed':\n header = ['IP Address', 'Connection Type', 'Device Type', 'Connectivity', 'Authentication',\n 'Authorization', 'Discovery Status', 'Connection Exception']\n worksheet.append(header)\n return len(header), header\n\n def write_to_sheet(device_list, worksheet, device_type):\n \"\"\"\n :param device_type: 'RouterSwitch', 'Phone', 'CUCMPhone', 'WAP', 'Other', or 'Failed'\n :param device_list: List of devices\n :param worksheet: Device worksheet\n :return: list(rows)\n \"\"\"\n rows = []\n for device in device_list:\n if device_type != 'Failed':\n row = [device['hostname'], device['ip_address'], device['model'], device['software_version']]\n if device_type == 'RouterSwitch':\n if 'serial' in device:\n serial = device['serial']\n connection_type = device['connection_type']\n rommon = device['rommon']\n else:\n serial = 'Unknown'\n connection_type = 'Unknown'\n rommon = 'Unknown'\n row += [serial, connection_type, rommon, device['connection_attempt'], device['discovery_status']]\n if device['connection_attempt'] == 'Failed':\n for neighbor in device['neighbors']:\n row += [neighbor['hostname'], neighbor['ip_address'], neighbor['local_intf'],\n neighbor['remote_intf']]\n if device_type == 'Phone' or device_type == 'CUCMPhone':\n neighbor = device['neighbor']\n row += [device['voice_vlan'], device['mac_addr'], neighbor['hostname'], neighbor['ip_address'],\n neighbor['remote_intf']]\n if 'description' in device:\n row += [device['description'], device['directory_number']]\n if device_type == 'WAP' or device_type == 'Other':\n neighbor = device['neighbor']\n row += [neighbor['hostname'], neighbor['ip_address'], neighbor['remote_intf']]\n if device_type == 'Other':\n row.append(neighbor['local_intf'])\n else:\n row = [device['ip_address'], device['connection_type'], device['device_type'], device['connectivity'],\n device['authentication'], device['authorization'], device['discovery_status'],\n device['exception']]\n worksheet.append(row)\n rows.append(row)\n return rows\n\n def complete_sheet(device_list, worksheet, device_type):\n \"\"\"Completes workbook sheet\"\"\"\n column_num = len(device_list) + 1\n header_out = write_header(worksheet, device_type)\n header = header_out[1]\n header_length = header_out[0]\n letter = header_length - 1\n if letter > 25:\n column_letter = f'{alphabet[int(letter / 26) - 1]}{alphabet[letter % 26]}'\n else:\n column_letter = alphabet[letter]\n bottom_right_cell = f'{column_letter}{column_num}'\n rows = write_to_sheet(device_list, worksheet, device_type)\n\n # Creates table if there is data in table\n if len(device_list) != 0:\n table = Table(displayName=device_type, ref=f'A1:{bottom_right_cell}')\n style = TableStyleInfo(name='TableStyleMedium9', showFirstColumn=False, showLastColumn=False,\n showRowStripes=True, showColumnStripes=True)\n table.tableStyleInfo = style\n worksheet.add_table(table)\n\n # Sets column widths\n all_data = [header]\n all_data += rows\n column_widths = []\n for row in all_data:\n for i, cell in enumerate(row):\n if len(column_widths) > i:\n if len(str(cell)) > column_widths[i]:\n column_widths[i] = len(str(cell))\n else:\n column_widths += [len(str(cell))]\n\n for i, column_width in enumerate(column_widths):\n if i > 25:\n l1 = f'{alphabet[int(i / 26) - 1]}{alphabet[i % 26]}'\n else:\n l1 = alphabet[i]\n worksheet.column_dimensions[l1].width = column_width + 3\n\n complete_sheet(routers_switches, routers_switches_ws, 'RouterSwitch')\n complete_sheet(phones, phones_ws, phone_string)\n complete_sheet(aps, aps_ws, 'WAP')\n complete_sheet(others, others_ws, 'Other')\n complete_sheet(failed_devices, failed_ws, 'Failed')\n\n # Saves workbook\n date_time = datetime.now().strftime('%m_%d_%Y-%H_%M_%S')\n wb.save(f'{file_location}/network_inventory-{date_time}-.xlsx')"
]
| [
"0.6811611",
"0.6408173",
"0.62680244",
"0.59886086",
"0.57728636",
"0.5712114",
"0.56356823",
"0.5596462",
"0.55854857",
"0.55714446",
"0.5538315",
"0.5358879",
"0.5353194",
"0.5345031",
"0.5332525",
"0.53291404",
"0.5324864",
"0.5318297",
"0.53180987",
"0.5310183",
"0.5309682",
"0.5300614",
"0.52987367",
"0.529738",
"0.5244448",
"0.5241301",
"0.5235755",
"0.5223368",
"0.5216052",
"0.51970583"
]
| 0.6603783 | 1 |
Export samplesheets for hamilton machine. | def export_hamilton(args):
if args.type == 'filling_out':
clarity_epp.export.hamilton.samplesheet_filling_out(lims, args.process_id, args.output_file)
elif args.type == 'purify':
clarity_epp.export.hamilton.samplesheet_purify(lims, args.process_id, args.output_file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def export_tapestation(args):\n clarity_epp.export.tapestation.samplesheet(lims, args.process_id, args.output_file)",
"def test_generate_sample_sheet(self):\n pass",
"def start_output(self):\r\n self.create_output_file()\r\n\r\n for elem in range(len(self.output_zakladki)):\r\n self.output_file.create_sheet(self.output_zakladki[elem], elem)\r\n\r\n self.remowe_first_sheet()",
"def export_wells(self, w, title):\r\n self._check_out(title)\r\n np.savez_compressed(os.path.join(self.out_dir, title, title), w)",
"def test_export_spreadsheet(self):\r\n client = self.getClient()\r\n if client:\r\n exp = [['#SampleID', 'DOB'],\r\n ['#Example mapping file for the QIIME analysis package. '\r\n 'These 9 samples are from a study of the effects of exercise '\r\n 'and diet on mouse cardiac physiology (Crawford, et al, '\r\n 'PNAS, 2009).'], ['PC.354', '20061218'],\r\n ['PC.355', '20061218'], ['PC.356', '20061126'],\r\n ['PC.481', '20070314'], ['PC.593', '20071210'],\r\n ['PC.607', '20071112'], ['PC.634', '20080116'],\r\n ['PC.635', '20080116'], ['PC.636', '20080116']]\r\n obs = _export_spreadsheet(client, self.spreadsheet_key,\r\n self.worksheet_id, ['#SampleID', 'DOB'])\r\n self.assertEqual(obs, exp)\r\n else:\r\n raise GoogleSpreadsheetConnectionError(\"Cannot execute test \"\r\n \"without an active Internet connection.\")",
"def export_all_to_excel(input_hdf5, out_directory_path):\n data_store = pd.HDFStore(input_hdf5) # Opening the HDF5 file\n for each_key in data_store.keys():\n data_store[each_key].to_excel(out_directory_path + each_key + \".xlsx\")\n # '/' missing between folder name and\n # file name because file name already includes it.\n data_store.close()\n\n print(\"-- Dataframes written to Excel files (.xlsx) --\")",
"def create_export_files(n,input_choice,timing,min_hull_per):\n\n\n\texists = os.path.isdir('analysis')\n\tif exists:\n\t\tf = open('analysis/results.csv','a',newline='')\n\t\tresults = csv.writer(f)\n\telse:\n\t\tos.mkdir('analysis')\n\t\tf = open('analysis/results.csv','w',newline='')\n\t\tresults = csv.writer(f)\n\t\tresults.writerow(['Algo','Size of Input','Min. Hull Pts Per','Type of Input','Timing'])\n\n\n\tresults.writerow(['Graham Scan',n,min_hull_per,input_choice,timing])",
"def export_tecan(args):\n clarity_epp.export.tecan.samplesheet(lims, args.process_id, args.type, args.output_file)",
"def main():\n\n gephyrin_df = gephyrin_pairwise()\n cav31_df = cav31_pairwise()\n synapsin_df = synapsin_pairwise()\n psd_df = psd95_pairwise()\n vglut1_df = vglut1_pairwise()\n\n\n sheet_name = 'Pairwise'\n fn = 'pairwise_comparisons.xlsx'\n df_list = [synapsin_df, vglut1_df, psd_df, gephyrin_df, cav31_df]\n aa.write_dfs_to_excel(df_list, sheet_name, fn)",
"def writeKnowledgeAreaWorksheets(wb: xlsxwriter.Workbook) -> None:\n\n global knowledgeAreas\n\n for knowledgeArea in knowledgeAreas:\n ws = wb.add_worksheet()\n ws.name = knowledgeArea.getText()[0:31]\n writeWorksheet(ws, knowledgeArea)",
"def driver():\n\n directory = r\"C:/Users/Aftab Alam/Documents/GitHub\"\n directory = directory + r\"/SRM-placement-analyser/data/\"\n fileList = [directory+\"InfosysResult.xlsx\",directory+\"TCSResult.xlsx\",directory+\"CognizantResult.xlsx\",directory+\"WiproResult.xlsx\"]\n \n listOfPlaced = extractCommonData.extractCommonData(fileList)\n createNewExcelSheet(directory,listOfPlaced)",
"def write(cls, experiment: Experiment):\n cls.__mutex.acquire()\n os.makedirs('./temp', exist_ok=True)\n worksheet = cls.__workbook.add_worksheet(experiment.name)\n for i, value in enumerate(experiment.values.items()):\n worksheet.write(0, i, value[0])\n worksheet.write_column(1, i, value[1])\n if experiment.model == 'accuracy':\n # cls.__add_accuracy_plot(worksheet, value)\n cls.test(worksheet, value)\n\n pass\n\n if experiment.model == 'performance':\n cls.test(worksheet, value)\n pass\n # cls.__add_accuracy_plot(worksheet, value)\n\n cls.__mutex.release()",
"def multi_sheet(self):\n # Initialize #\n all_sheets = []\n # Loop #\n for name in self.handle.sheet_names:\n sheet = self.handle.parse(name)\n sheet.insert(0, \"nace\", name)\n all_sheets.append(sheet)\n # Write #\n df = pandas.concat(all_sheets)\n df.to_csv(str(self.dest), **self.kwargs)",
"def export_bioanalyzer(args):\n clarity_epp.export.bioanalyzer.samplesheet(lims, args.process_id, args.output_file)",
"def to_xlsx(self, filename):\n # create path if it does not exist\n suffix = filename.split(\".\")[-1]\n if not suffix == \"xlsx\":\n filename = filename + \".xlsx\"\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n writer = pd.ExcelWriter(filename)\n for name, df in sorted(self.input_data.items()):\n df.to_excel(writer, name)\n writer.save()\n logging.info(\"Scenario saved as excel file to %s\", filename)",
"def export_excel(self, filename):\n # convert table to array of rows\n rows = [self.headings]\n for y in range(self.rowcount):\n row = []\n for h in self.headings:\n row.append(self.table[h][y])\n rows.append(row)\n \n sheet = pyexcel.Sheet(rows, self.name, name_columns_by_row=0)\n sheet.save_as(filename)",
"def dfs_tabs(df_list, sheet_list, file_name):\n\n writer = pd.ExcelWriter(file_name,engine='xlsxwriter') \n for dataframe, sheet in zip(df_list, sheet_list):\n dataframe.to_excel(writer, sheet_name=sheet, startrow=0 , startcol=0, index=False) \n writer.save()",
"def export_caliper(args):\n if args.type == 'normalise':\n clarity_epp.export.caliper.samplesheet_normalise(lims, args.process_id, args.output_file)\n elif args.type == 'dilute':\n clarity_epp.export.caliper.samplesheet_dilute(lims, args.process_id, args.output_file)",
"def write_clinical_samples_tsv(sheets):\n\n # Header lines, first item must start with #\n # attribute Display Names\n NAMES = [\"#Patient Identifier\", \"Sample Identifier\"]\n # attribute Descriptions\n DESC = [\"#Patient Identifier\", \"Sample Identifier\"]\n # attribute Datatype\n DATATYPE = [\"#STRING\", \"STRING\"]\n # attribute Priority\n PRIORITY = [\"#1\", \"1\"]\n # attribute columns\n COLUMNS = [\"PATIENT_ID\", \"SAMPLE_ID\"]\n\n with open(snakemake.output.samples_tsv, \"w\") as tsvfile:\n writer = csv.writer(tsvfile, delimiter=\"\\t\")\n # write header\n writer.writerow(NAMES)\n writer.writerow(DESC)\n writer.writerow(DATATYPE)\n writer.writerow(PRIORITY)\n writer.writerow(COLUMNS)\n\n for sheet in sheets:\n for p in sheet.bio_entities.values():\n for s in p.bio_samples.values():\n if s.extra_infos[\"isTumor\"]:\n writer.writerow([p.name, s.name])",
"def start_output(self, output_lista_cegiel):\r\n self.create_output_file()\r\n\r\n for elem in range(len(output_lista_cegiel)):\r\n self.output_file.create_sheet(output_lista_cegiel[elem], elem)\r\n\r\n self.remowe_first_sheet()",
"def generate_sample_sheet(self):\n pool = self.pool\n bcl2fastq_sample_ids = []\n i7_names = []\n i7_sequences = []\n i5_names = []\n i5_sequences = []\n wells = []\n plate = pool.container.external_id\n sample_ids = []\n sequencer_type = self.sequencer.equipment_type\n\n for component in pool.components:\n lp_composition = component['composition']\n # Get the well information\n wells.append(lp_composition.container.well_id)\n # Get the i7 index information\n i7_comp = lp_composition.i7_composition.primer_set_composition\n i7_names.append(i7_comp.external_id)\n i7_sequences.append(i7_comp.barcode)\n # Get the i5 index information\n i5_comp = lp_composition.i5_composition.primer_set_composition\n i5_names.append(i5_comp.external_id)\n i5_sequences.append(i5_comp.barcode)\n # Get the sample id\n sample_id = lp_composition.normalized_gdna_composition.\\\n gdna_composition.sample_composition.content\n sample_ids.append(sample_id)\n\n # Transform te sample ids to be bcl2fastq-compatible\n bcl2fastq_sample_ids = [\n SequencingProcess._bcl_scrub_name(sid) for sid in sample_ids]\n # Reverse the i5 sequences if needed based on the sequencer\n i5_sequences = SequencingProcess._sequencer_i5_index(\n sequencer_type, i5_sequences)\n\n data = SequencingProcess._format_sample_sheet_data(\n bcl2fastq_sample_ids, i7_names, i7_sequences, i5_names,\n i5_sequences, wells=wells, sample_plate=plate,\n description=sample_ids, sample_proj=self.run_name,\n lanes=self.lanes, sep=',')\n\n contacts = {c.name: c.email for c in self.contacts}\n pi = self.principal_investigator\n principal_investigator = {pi.name: pi.email}\n sample_sheet_dict = {\n 'comments': SequencingProcess._format_sample_sheet_comments(\n principal_investigator=principal_investigator,\n contacts=contacts),\n 'IEMFileVersion': '4',\n 'Investigator Name': pi.name,\n 'Experiment Name': self.experiment,\n 'Date': str(self.date),\n 'Workflow': 'GenerateFASTQ',\n 'Application': 'FASTQ Only',\n 'Assay': self.assay,\n 'Description': '',\n 'Chemistry': 'Default',\n 'read1': self.fwd_cycles,\n 'read2': self.rev_cycles,\n 'ReverseComplement': '0',\n 'data': data}\n return SequencingProcess._format_sample_sheet(sample_sheet_dict)",
"def writeResult(outfilename, blocks, data_per_group):\n allfile = open(\"\".join([\"res_\", outfilename, \".xls\"]), \"w\")\n meandevfile = open(\"\".join([\"res_\", outfilename, \"_meandev.xls\"]), \"w\")\n\n # write titles\n i = 0\n for letter in range(len(blocks[0])/12):\n for number in range(12):\n i += 1\n allfile.write(\"\".join([chr(ord('A') + letter), str(number+1), '\\t']))\n if (i % data_per_group == 0):\n allfile.write(\"\\t\")\n if ((i-1) % data_per_group == 0):\n meandevfile.write(\"\".join([chr(ord('A') + letter), str(number+1), '\\t']))\n else:\n meandevfile.write(\"\\t\")\n allfile.write(\"\\n\")\n meandevfile.write(\"\\n\")\n\n\n for block in blocks:\n i = 0\n thl = []\n for n in block:\n thl.append(n)\n allfile.write(\"%f\\t\" % n)\n i += 1\n if (i % data_per_group == 0):\n allfile.write(\"\\t\")\n m, d = meandev(thl)\n meandevfile.write(\"%f\\t\" % m)\n meandevfile.write(\"%f\" % d)\n meandevfile.write(\"\\t\\t\")\n thl = []\n allfile.write(\"\\n\")\n meandevfile.write(\"\\n\")",
"def generate_xls(self):\n self.wb = xlwt.Workbook()\n ws = self.wb.add_sheet('Sheet1')\n heading_style = xlwt.easyxf('font: bold true; alignment: horizontal center, wrap true;')\n extra_row = 0\n if self.date:\n date_style = xlwt.easyxf('font: bold true; alignment: horizontal left, wrap true;')\n ws.write_merge(0,0,0,self.table.no_of_columns()-1,'Date : '+self.date,date_style) \n extra_row = 1\n for i in range(len(self.headings)):\n ws.write_merge(i+extra_row,i+extra_row,0,self.table.no_of_columns()-1,self.headings[i],heading_style)\n ws.set_panes_frozen(True)\n ws.set_horz_split_pos(len(self.headings)+extra_row+1)\n ws.set_remove_splits(True)\n self.table.to_xls(ws,start_row=len(self.headings)+extra_row,start_col=0)\n return self.wb",
"def _cmd_export_bed(args):\n bed_tables = []\n for segfname in args.segments:\n segments = read_cna(segfname)\n # ENH: args.sample_sex as a comma-separated list\n is_sample_female = verify_sample_sex(\n segments, args.sample_sex, args.male_reference, args.diploid_parx_genome\n )\n if args.sample_id:\n label = args.sample_id\n elif args.label_genes:\n label = None\n else:\n label = segments.sample_id\n tbl = export.export_bed(\n segments,\n args.ploidy,\n args.male_reference,\n args.diploid_parx_genome,\n is_sample_female,\n label,\n args.show,\n )\n bed_tables.append(tbl)\n table = pd.concat(bed_tables)\n write_dataframe(args.output, table, header=False)",
"def exporter():\n Session = modules.db_connect.connect()\n session = Session()\n report = xlsxwriter.Workbook('perception_report.xlsx')\n top_row_format = report.add_format({'bold': True})\n top_row_format.set_border(style=1)\n top_row_format.set_bg_color('#B8B8B8')\n\n \"\"\"Black row format at the top of each host detailed info\"\"\"\n black_row_format = report.add_format()\n black_row_format.set_border(style=1)\n black_row_format.set_bg_color('#000000')\n\n \"\"\"Detailed host row format\"\"\"\n host_row_format = report.add_format()\n host_row_format.set_border(style=1)\n host_row_format.set_bg_color('#CCCCCC')\n\n \"\"\"Format for text in row with host info\"\"\"\n host_row_wrapped_format = report.add_format()\n host_row_wrapped_format.set_border(style=1)\n host_row_wrapped_format.set_bg_color('#CCCCCC')\n host_row_wrapped_format.set_text_wrap('vjustify')\n\n \"\"\"Format description row in NSE output\"\"\"\n host_nse_output_top_format = report.add_format({'bold': True})\n host_nse_output_top_format.set_border(style=1)\n host_nse_output_top_format.set_bg_color('#B8B8B8')\n\n \"\"\"Format test row in NSE output\"\"\"\n host_nse_output_format = report.add_format()\n host_nse_output_format.set_border(style=1)\n host_nse_output_format.set_bg_color('#CCCCCC')\n\n \"\"\"Build the host_overview_worksheet\"\"\"\n host_overview_worksheet = report.add_worksheet()\n\n \"\"\"Build the host_detail_worksheet\"\"\"\n host_detail_worksheet = report.add_worksheet()\n\n \"\"\"Size up the overview worksheet\"\"\"\n host_overview_worksheet.set_column('B:B', 24)\n host_overview_worksheet.set_column('C:C', 15)\n host_overview_worksheet.set_column('D:D', 15)\n host_overview_worksheet.set_column('E:E', 15)\n host_overview_worksheet.set_column('F:F', 15)\n host_overview_worksheet.set_column('G:G', 20)\n host_overview_worksheet.set_column('H:H', 15)\n\n \"\"\"Size up the detail worksheet\"\"\"\n host_detail_worksheet.set_column('B:B', 38)\n host_detail_worksheet.set_column('C:C', 16)\n host_detail_worksheet.set_column('D:D', 16)\n host_detail_worksheet.set_column('E:E', 28)\n host_detail_worksheet.set_column('F:F', 15)\n host_detail_worksheet.set_column('H:G', 20)\n host_detail_worksheet.set_column('H:H', 25)\n host_detail_worksheet.set_column('I:I', 10)\n\n \"\"\"Description row for host overview\"\"\"\n host_overview_worksheet.write('B2', 'Hostname', top_row_format)\n host_overview_worksheet.write('C2', 'IP v4 Address', top_row_format)\n host_overview_worksheet.write('D2', 'IP v6 Address', top_row_format)\n host_overview_worksheet.write('E2', 'MAC Address', top_row_format)\n host_overview_worksheet.write('F2', 'MAC Vendor', top_row_format)\n host_overview_worksheet.write('G2', 'Operating System', top_row_format)\n host_overview_worksheet.write('H2', 'Host Type', top_row_format)\n\n \"\"\"Query the database for the hosts\"\"\"\n inventory_hosts = session.query(InventoryHost).all()\n\n \"\"\"Build overview worksheet\"\"\"\n overview_row = 2\n overview_col = 1\n for host in inventory_hosts:\n host_overview_worksheet.write(overview_row, overview_col, host.host_name, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 1, host.ipv4_addr, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 2, host.ipv6_addr, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 3, host.macaddr, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 4, host.mac_vendor.name, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 5, host.product.name, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 6, host.host_type, host_row_format)\n overview_row += 1\n\n \"\"\"Build detailed worksheet\"\"\"\n detail_row = 2\n detail_col = 1\n for host in inventory_hosts:\n\n \"\"\"Add the black row to start host detail info\"\"\"\n host_detail_worksheet.set_row(detail_row, 5)\n host_detail_worksheet.write(detail_row, detail_col, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, '', black_row_format)\n detail_row += 1\n\n \"\"\"Add row detail info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Hostname', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'IP v4 Address', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'IP v6 Address', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'MAC Address', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'MAC Vendor', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'Host Type', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'Operating System', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'Version', top_row_format)\n detail_row += 1\n\n \"\"\"Add host info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, host.host_name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, host.ipv4_addr, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, host.ipv6_addr, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, host.macaddr, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, host.mac_vendor.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, host.host_type, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, host.product.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, host.product.version, host_row_format)\n detail_row += 2\n\n \"\"\"If there is no host nse script, just say so.\"\"\"\n if not host.host_nse_scripts:\n host_detail_worksheet.write(detail_row, detail_col, 'Host NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n host_detail_worksheet.write(detail_row, detail_col, 'No Script Name', host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'No Script Output', host_row_wrapped_format)\n detail_row += 2\n else:\n\n \"\"\"Add the row detail\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Host NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n\n \"\"\"Grab all the scripts\"\"\"\n for host_scripts in host.host_nse_scripts:\n\n \"\"\"Count output the lines so we know what to merge\"\"\"\n lines = host_scripts.output.count('\\n')\n\n if lines > 0:\n\n \"\"\"Merge the rows and write the name and output\"\"\"\n host_detail_worksheet.merge_range(detail_row, detail_col, detail_row + lines, detail_col,\n host_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines, detail_col + 7,\n host_scripts.output, host_row_wrapped_format)\n detail_row += 1\n else:\n\n \"\"\"Single line output\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, host_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines, detail_col + 7,\n host_scripts.output, host_row_wrapped_format)\n detail_row += 1\n\n if not host.inventory_svcs:\n\n \"\"\"If there are no services for this host tell me\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Protocol', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'Port', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'Name', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'Svc Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'Extra Info', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'Version', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'Update', top_row_format)\n detail_row += 1\n\n host_detail_worksheet.write(detail_row, detail_col, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'no services', host_row_format)\n detail_row += 1\n\n else:\n for ports in host.inventory_svcs:\n\n \"\"\"Host services row info\"\"\"\n detail_row += 1\n host_detail_worksheet.write(detail_row, detail_col, 'Protocol', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'Port', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'Name', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'Svc Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'Extra Info', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'Version', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'Update', top_row_format)\n detail_row += 1\n\n \"\"\"Write the service info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, ports.protocol, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, ports.portid, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, ports.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, ports.svc_product, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, ports.extra_info, host_row_format)\n try:\n\n \"\"\"There may not be product info, but try.\"\"\"\n host_detail_worksheet.write(detail_row, detail_col + 5, ports.product.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, ports.product.version, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, ports.product.product_update,\n host_row_format)\n detail_row += 1\n except AttributeError:\n\n \"\"\"Just write unknown if there is no product info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col + 5, 'unknown', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'unknown', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'unknown', host_row_format)\n detail_row += 1\n\n if not ports.svc_nse_scripts:\n\n \"\"\"If there is no NSE script info just say so.\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Svc NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n host_detail_worksheet.write(detail_row, detail_col, 'No Script Name', host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'No Script Output', host_row_wrapped_format)\n detail_row += 2\n\n else:\n\n \"\"\"Service Script row detail\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Svc NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n\n \"\"\"Grab all the scripts\"\"\"\n for nse_scripts in ports.svc_nse_scripts:\n\n \"\"\"Count the lines in the output for merging\"\"\"\n lines = nse_scripts.output.count('\\n')\n\n if lines > 0:\n\n \"\"\"Merge the rows and write the name and output\"\"\"\n host_detail_worksheet.merge_range(detail_row, detail_col, detail_row + lines, detail_col,\n nse_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines, detail_col + 7,\n nse_scripts.output, host_row_wrapped_format)\n detail_row += 1\n else:\n\n \"\"\"Single line output\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, nse_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines,\n detail_col + 7, nse_scripts.output,\n host_row_wrapped_format)\n detail_row += 1\n\n detail_row += 1\n report.close()\n session.close()",
"def test_export(filename, folder, space_type):\n grid = bempp.api.shapes.cube(h=0.5)\n space = bempp.api.function_space(grid, *space_type)\n function = bempp.api.GridFunction(\n space, coefficients=np.random.rand(space.global_dof_count)\n )\n bempp.api.export(os.path.join(folder, filename), grid_function=function)",
"def export_samfile(self):",
"def write_one_sheet(self, key):\n # Get sheet #\n sheet = self.writer.sheets[key]\n # Get dataframes #\n all_dfs = self.sheet_to_dfs[key]\n # Initialize #\n row = 0\n # Loop #\n for info in all_dfs:\n # Get dataframe #\n df = info['dataframe']\n # Write custom title #\n sheet.write_string(row, 0, info.get('title', ''))\n row += 2\n # Add extras #\n df.index.name = info.get('y_extra', '')\n df.columns.name = info.get('x_extra', '')\n # Add Y labels #\n title, label = info.get('y_title', ''), info.get('y_label', '')\n df = pandas.concat({title: df}, names=[label])\n # Add X labels #\n title, label = info.get('x_title', ''), info.get('x_label', '')\n df = pandas.concat({title: df}, names=[label], axis=1)\n # Write dataframe #\n df.to_excel(self.writer,\n sheet_name = key,\n startrow = row,\n startcol = self.indentation)\n # Increment #\n row += len(df.index) + self.spacing",
"def write_shards(\n voxceleb_folder_path: pathlib.Path,\n shards_path: pathlib.Path,\n compress_in_place: bool,\n shard_name_pattern: str = \"shard-{idx:06d}\",\n samples_per_shard: int = 5000,\n sequential_same_speaker_samples: int = 4,\n min_unique_speakers_per_shard: int = 32,\n ensure_all_data_in_shards: bool = False,\n discard_partial_shards: bool = True,\n):\n # make sure output folder exist\n shards_path.mkdir(parents=True, exist_ok=True)\n\n # find all audio files\n audio_files = sorted([f for f in voxceleb_folder_path.rglob(\"*.wav\")])\n\n # create data dictionary {speaker id: List[file_path, sample_key]}}\n data: Dict[str, List[Tuple[str, str, pathlib.Path]]] = defaultdict(list)\n\n # track statistics on data\n all_speaker_ids = set()\n all_youtube_ids = set()\n all_sample_ids = set()\n youtube_id_per_speaker = defaultdict(list)\n sample_keys_per_speaker = defaultdict(list)\n num_samples = 0\n all_keys = set()\n\n for f in audio_files:\n # path should be\n # ${voxceleb_folder_path}/wav/speaker_id/youtube_id/utterance_id.wav\n speaker_id = f.parent.parent.name\n youtube_id = f.parent.name\n utterance_id = f.stem\n\n # create a unique key for this sample\n key = f\"{speaker_id}{ID_SEPARATOR}{youtube_id}{ID_SEPARATOR}{utterance_id}\"\n\n if key in all_keys:\n raise ValueError(\"found sample with duplicate key\")\n else:\n all_keys.add(key)\n\n # store statistics\n num_samples += 1\n\n all_speaker_ids.add(speaker_id)\n all_youtube_ids.add(youtube_id)\n all_sample_ids.add(key)\n\n youtube_id_per_speaker[speaker_id].append(youtube_id)\n sample_keys_per_speaker[speaker_id].append(key)\n\n # store data in dict\n data[speaker_id].append((key, speaker_id, f))\n\n # randomly shuffle the list of all samples for each speaker\n for speaker_id in data.keys():\n random.shuffle(data[speaker_id])\n\n # determine a specific speaker_id label for each speaker_id\n speaker_id_to_idx = {\n speaker_id: idx for idx, speaker_id in enumerate(sorted(all_speaker_ids))\n }\n\n # write a meta.json file which contains statistics on the data\n # which will be written to shards\n all_speaker_ids = list(all_speaker_ids)\n all_youtube_ids = list(all_youtube_ids)\n all_sample_ids = list(all_sample_ids)\n\n meta_dict = {\n \"speaker_ids\": all_speaker_ids,\n \"youtube_ids\": all_youtube_ids,\n \"sample_ids\": all_sample_ids,\n \"speaker_id_to_idx\": speaker_id_to_idx,\n \"youtube_ids_per_speaker\": youtube_id_per_speaker,\n \"sample_ids_per_speaker\": sample_keys_per_speaker,\n \"num_samples\": num_samples,\n \"num_speakers\": len(all_speaker_ids),\n }\n\n with (shards_path / \"meta.json\").open(\"w\") as f:\n json.dump(meta_dict, f)\n\n # split the data into shards such that each shard has at most\n # `samples_per_shard` samples and that the sequential order in the\n # shard is:\n # 1 = sample of speaker id `i`\n # ...\n # sequential_same_speaker_samples =sample of speaker id `i`\n # sequential_same_speaker_samples + 1 = sample of speaker id `j`\n # etc\n shards_list = []\n\n def samples_left():\n num_samples_left = sum(len(v) for v in data.values())\n num_valid_speakers = sum(\n len(v) >= sequential_same_speaker_samples for v in data.values()\n )\n\n # a shard should contain at least 2 different speakers\n if num_valid_speakers >= 2 or ensure_all_data_in_shards:\n return num_samples_left\n else:\n return 0\n\n def valid_speakers(n: int, previous_id: Optional[str] = None):\n return [k for k in data.keys() if len(data[k]) >= n and k != previous_id]\n\n def pop_n_samples(\n n: int, current_speakers_in_shard: Set[str], previous_id: Optional[str] = None\n ):\n valid_speaker_ids = valid_speakers(n, previous_id)\n\n if len(current_speakers_in_shard) < min_unique_speakers_per_shard:\n valid_speaker_ids = [\n sid for sid in valid_speaker_ids if sid not in current_speakers_in_shard\n ]\n\n if len(valid_speaker_ids) == 0:\n raise ValueError(\n f\"shard cannot be guaranteed to have {min_unique_speakers_per_shard=}\"\n )\n\n samples_per_speaker = [len(data[k]) for k in valid_speaker_ids]\n random_speaker_id = random.choices(valid_speaker_ids, samples_per_speaker)[0]\n current_speakers_in_shard.add(random_speaker_id)\n popped_samples = []\n\n for _ in range(n):\n sample_list = data[random_speaker_id]\n popped_samples.append(\n sample_list.pop(random.randint(0, len(sample_list) - 1))\n )\n\n return popped_samples, random_speaker_id, current_speakers_in_shard\n\n # write shards\n while samples_left() > 0:\n shard = []\n speakers_in_shard = set()\n previous = None\n\n print(\n f\"determined shards={len(shards_list):>4}\\t\"\n f\"samples left={samples_left():>9,d}\\t\"\n f\"speakers left=\"\n f\"{len(valid_speakers(sequential_same_speaker_samples, previous)):>4,d}\"\n )\n while len(shard) < samples_per_shard and samples_left() > 0:\n samples, previous, speakers_in_shard = pop_n_samples(\n n=sequential_same_speaker_samples,\n current_speakers_in_shard=speakers_in_shard,\n previous_id=previous,\n )\n for key, speaker_id, f in samples:\n shard.append((key, speaker_id_to_idx[speaker_id], f))\n\n shards_list.append(shard)\n\n # assert all data is in a shard\n if ensure_all_data_in_shards:\n assert sum(len(v) for v in data.values()) == 0\n\n # remove any shard which does share the majority amount of samples\n if discard_partial_shards:\n unique_len_count = defaultdict(int)\n for lst in shards_list:\n unique_len_count[len(lst)] += 1\n\n if len(unique_len_count) > 2:\n raise ValueError(\"expected at most 2 unique lengths\")\n\n if len(unique_len_count) == 0:\n raise ValueError(\"expected at least 1 unique length\")\n\n majority_len = -1\n majority_count = -1\n for unique_len, count in unique_len_count.items():\n if count > majority_count:\n majority_len = unique_len\n majority_count = count\n\n shards_list = [lst for lst in shards_list if len(lst) == majority_len]\n\n # write shards\n shards_path.mkdir(exist_ok=True, parents=True)\n\n # seems like disk write speed only allows for 1 process anyway :/\n with multiprocessing.Pool(processes=1) as p:\n for idx, shard_content in enumerate(shards_list):\n args = {\n \"shard_name\": shard_name_pattern.format(idx=idx),\n \"shards_path\": shards_path,\n \"data_tpl\": shard_content,\n \"compress\": compress_in_place,\n }\n p.apply_async(\n _write_shard,\n kwds=args,\n error_callback=lambda x: print(\n f\"error in apply_async ``_write_shard!\\n{x}\"\n ),\n )\n\n p.close()\n p.join()",
"def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_by_cause_10yr_average_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response"
]
| [
"0.6590058",
"0.6386988",
"0.59298795",
"0.58624107",
"0.5822092",
"0.58134943",
"0.5705406",
"0.56998646",
"0.56144476",
"0.549566",
"0.54687804",
"0.5456929",
"0.5437227",
"0.54209745",
"0.5414605",
"0.5351281",
"0.5350195",
"0.5331751",
"0.52800256",
"0.52799267",
"0.5273853",
"0.52720463",
"0.52185833",
"0.5211999",
"0.51966435",
"0.51933175",
"0.5183122",
"0.51654464",
"0.51532584",
"0.51501364"
]
| 0.7419353 | 0 |
Export (updated) illumina samplesheet. | def export_illumina(args):
clarity_epp.export.illumina.update_samplesheet(lims, args.process_id, args.artifact_id, args.output_file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_generate_sample_sheet(self):\n pass",
"def make_custom_sample_sheet(input_sample_sheet,output_sample_sheet=None,\n lanes=None,fmt=None):\n # Load the sample sheet data\n sample_sheet = IlluminaData.SampleSheet(input_sample_sheet)\n # Determine the column names for this format\n if sample_sheet.format == 'CASAVA':\n sample_col = 'SampleID'\n project_col = 'SampleProject'\n elif sample_sheet.format == 'IEM':\n sample_col = 'Sample_ID'\n project_col = 'Sample_Project'\n else:\n raise Exception(\"Unknown sample sheet format: %s\" %\n sample_sheet.format)\n # Add project names if not supplied\n for line in sample_sheet:\n if not line[project_col]:\n line[project_col] = line[sample_col]\n # Fix other problems\n sample_sheet.fix_illegal_names()\n sample_sheet.fix_duplicated_names()\n # Select subset of lanes if requested\n if lanes is not None:\n logging.debug(\"Updating to include only specified lanes: %s\" %\n ','.join([str(l) for l in lanes]))\n i = 0\n while i < len(sample_sheet):\n line = sample_sheet[i]\n if line['Lane'] in lanes:\n logging.debug(\"Keeping %s\" % line)\n i += 1\n else:\n del(sample_sheet[i])\n # Write out new sample sheet\n if output_sample_sheet is not None:\n sample_sheet.write(output_sample_sheet,fmt=fmt)\n return sample_sheet",
"def export_tapestation(args):\n clarity_epp.export.tapestation.samplesheet(lims, args.process_id, args.output_file)",
"def mono_sheet(self):\n xls = pandas.read_excel(str(self.source))\n xls.to_csv(str(self.dest), **self.kwargs)",
"def export_tecan(args):\n clarity_epp.export.tecan.samplesheet(lims, args.process_id, args.type, args.output_file)",
"def export_bioanalyzer(args):\n clarity_epp.export.bioanalyzer.samplesheet(lims, args.process_id, args.output_file)",
"def test_export_spreadsheet(self):\r\n client = self.getClient()\r\n if client:\r\n exp = [['#SampleID', 'DOB'],\r\n ['#Example mapping file for the QIIME analysis package. '\r\n 'These 9 samples are from a study of the effects of exercise '\r\n 'and diet on mouse cardiac physiology (Crawford, et al, '\r\n 'PNAS, 2009).'], ['PC.354', '20061218'],\r\n ['PC.355', '20061218'], ['PC.356', '20061126'],\r\n ['PC.481', '20070314'], ['PC.593', '20071210'],\r\n ['PC.607', '20071112'], ['PC.634', '20080116'],\r\n ['PC.635', '20080116'], ['PC.636', '20080116']]\r\n obs = _export_spreadsheet(client, self.spreadsheet_key,\r\n self.worksheet_id, ['#SampleID', 'DOB'])\r\n self.assertEqual(obs, exp)\r\n else:\r\n raise GoogleSpreadsheetConnectionError(\"Cannot execute test \"\r\n \"without an active Internet connection.\")",
"def test_parse_sample_sheet(self):\n pass",
"def start_output(self):\r\n self.create_output_file()\r\n\r\n for elem in range(len(self.output_zakladki)):\r\n self.output_file.create_sheet(self.output_zakladki[elem], elem)\r\n\r\n self.remowe_first_sheet()",
"def outputExcelReport(self):\n # ++++++++++\n # init\n # ++++++++++\n wb = openpyxl.Workbook()\n wb.fonts = openpyxl.styles.Font(\n name = 'Courier New',\n size = 12\n )\n # create and delete sheets\n _ = wb.create_sheet(title='Cover',index=0)\n _ = wb.create_sheet(title='Results',index=1)\n _ = wb.create_sheet(title='AllItems',index=2)\n _ = wb.remove(wb.worksheets[-1])\n # ++++++++++\n # Sheet 1 <Cover>\n # ++++++++++\n ws = wb['Cover']\n # --- title and date\n timeNow = datetime.datetime.now().isoformat().split('T')[0]\n ws.merge_cells('A1:B1')\n ws.merge_cells('A3:B3')\n ws['A1'] = '納入チェック ダイアグ確認結果'\n ws['A3'] = '作成日:{}'.format(timeNow)\n # --- sample info\n ws['A5'] = '<サンプル情報>'\n self._write2excel(ws, self._sample_info, 6, 1)\n for r in range(6,8):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- checker info\n ws['A9'] = '<チェッカ情報>'\n self._write2excel(ws, self._checker_info, 10, 1)\n for r in range(10,13):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- dmm info\n ws['A14'] = '<DMM情報>'\n self._write2excel(ws, self._dmm_info, 15, 1)\n for r in range(15,18):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- resistor info\n ws['A19'] = '<抵抗器情報>'\n self._write2excel(ws, self._resistor_info, 20, 1)\n for r in range(20,23):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n ws[cell.coordinate].font = STYLE_FONT_PASS\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # Sheet 2 <Results>\n # ++++++++++\n ws = wb['Results']\n # --- output all scenario\n ws['A1'] = '<結果一覧>'\n ws.merge_cells('A1:B1')\n self._write2excel(ws, self._result_info, 2, 1)\n for r in range(2,ws.max_row+1):\n for c in range(1,ws.max_column+1):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n # font color\n ws[cell.coordinate].font = STYLE_FONT_PASS\n cell.alignment = openpyxl.styles.Alignment(vertical='top')\n if cell.column==6:\n if ws[cell.coordinate].value =='FAIL':\n ws.cell(cell.row,1).font = STYLE_FONT_FAIL\n ws.cell(cell.row,2).font = STYLE_FONT_FAIL\n ws.cell(cell.row,3).font = STYLE_FONT_FAIL\n ws.cell(cell.row,4).font = STYLE_FONT_FAIL\n ws.cell(cell.row,5).font = STYLE_FONT_FAIL\n ws.cell(cell.row,6).font = STYLE_FONT_FAIL\n # cell color by header/even row\n if cell.row==2:\n ws[cell.coordinate].fill = STYLE_FILL_HEADER\n elif cell.row%2==0:\n ws[cell.coordinate].fill = STYLE_FILL_EVEN_ROW\n # indent in cell\n if '\\n' in str(cell.value):\n cell.alignment = openpyxl.styles.Alignment(wrapText=True)\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # Sheet 3 <AllItems>\n # ++++++++++\n ws = wb['AllItems']\n # --- output all scenario\n ws['A1'] = '<出力一覧>'\n ws.merge_cells('A1:B1')\n self._write2excel(ws, self._scenario_info, 2, 1)\n for r in range(2,ws.max_row+1):\n for c in range(1,ws.max_column+1):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n # font color\n ws[cell.coordinate].font = STYLE_FONT_PASS\n cell.alignment = openpyxl.styles.Alignment(vertical='top')\n if cell.column==5:\n if ws[cell.coordinate].value =='FAIL':\n ws.cell(cell.row,1).font = STYLE_FONT_FAIL\n ws.cell(cell.row,2).font = STYLE_FONT_FAIL\n ws.cell(cell.row,3).font = STYLE_FONT_FAIL\n ws.cell(cell.row,4).font = STYLE_FONT_FAIL\n ws.cell(cell.row,5).font = STYLE_FONT_FAIL\n # cell color by header/even row\n if cell.row==2:\n ws[cell.coordinate].fill = STYLE_FILL_HEADER\n elif cell.row%2==0:\n ws[cell.coordinate].fill = STYLE_FILL_EVEN_ROW\n # indent in cell\n if '\\n' in str(cell.value):\n cell.alignment = openpyxl.styles.Alignment(wrapText=True)\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # save book\n # ++++++++++\n wb.save(self._filename)",
"def generate_sample_sheet(self):\n pool = self.pool\n bcl2fastq_sample_ids = []\n i7_names = []\n i7_sequences = []\n i5_names = []\n i5_sequences = []\n wells = []\n plate = pool.container.external_id\n sample_ids = []\n sequencer_type = self.sequencer.equipment_type\n\n for component in pool.components:\n lp_composition = component['composition']\n # Get the well information\n wells.append(lp_composition.container.well_id)\n # Get the i7 index information\n i7_comp = lp_composition.i7_composition.primer_set_composition\n i7_names.append(i7_comp.external_id)\n i7_sequences.append(i7_comp.barcode)\n # Get the i5 index information\n i5_comp = lp_composition.i5_composition.primer_set_composition\n i5_names.append(i5_comp.external_id)\n i5_sequences.append(i5_comp.barcode)\n # Get the sample id\n sample_id = lp_composition.normalized_gdna_composition.\\\n gdna_composition.sample_composition.content\n sample_ids.append(sample_id)\n\n # Transform te sample ids to be bcl2fastq-compatible\n bcl2fastq_sample_ids = [\n SequencingProcess._bcl_scrub_name(sid) for sid in sample_ids]\n # Reverse the i5 sequences if needed based on the sequencer\n i5_sequences = SequencingProcess._sequencer_i5_index(\n sequencer_type, i5_sequences)\n\n data = SequencingProcess._format_sample_sheet_data(\n bcl2fastq_sample_ids, i7_names, i7_sequences, i5_names,\n i5_sequences, wells=wells, sample_plate=plate,\n description=sample_ids, sample_proj=self.run_name,\n lanes=self.lanes, sep=',')\n\n contacts = {c.name: c.email for c in self.contacts}\n pi = self.principal_investigator\n principal_investigator = {pi.name: pi.email}\n sample_sheet_dict = {\n 'comments': SequencingProcess._format_sample_sheet_comments(\n principal_investigator=principal_investigator,\n contacts=contacts),\n 'IEMFileVersion': '4',\n 'Investigator Name': pi.name,\n 'Experiment Name': self.experiment,\n 'Date': str(self.date),\n 'Workflow': 'GenerateFASTQ',\n 'Application': 'FASTQ Only',\n 'Assay': self.assay,\n 'Description': '',\n 'Chemistry': 'Default',\n 'read1': self.fwd_cycles,\n 'read2': self.rev_cycles,\n 'ReverseComplement': '0',\n 'data': data}\n return SequencingProcess._format_sample_sheet(sample_sheet_dict)",
"def convert_sheet(filename, output):\n r2dt.write_converted_sheet(filename, output)",
"def export_excel(self, filename):\n # convert table to array of rows\n rows = [self.headings]\n for y in range(self.rowcount):\n row = []\n for h in self.headings:\n row.append(self.table[h][y])\n rows.append(row)\n \n sheet = pyexcel.Sheet(rows, self.name, name_columns_by_row=0)\n sheet.save_as(filename)",
"def driver():\n\n directory = r\"C:/Users/Aftab Alam/Documents/GitHub\"\n directory = directory + r\"/SRM-placement-analyser/data/\"\n fileList = [directory+\"InfosysResult.xlsx\",directory+\"TCSResult.xlsx\",directory+\"CognizantResult.xlsx\",directory+\"WiproResult.xlsx\"]\n \n listOfPlaced = extractCommonData.extractCommonData(fileList)\n createNewExcelSheet(directory,listOfPlaced)",
"def exporter():\n Session = modules.db_connect.connect()\n session = Session()\n report = xlsxwriter.Workbook('perception_report.xlsx')\n top_row_format = report.add_format({'bold': True})\n top_row_format.set_border(style=1)\n top_row_format.set_bg_color('#B8B8B8')\n\n \"\"\"Black row format at the top of each host detailed info\"\"\"\n black_row_format = report.add_format()\n black_row_format.set_border(style=1)\n black_row_format.set_bg_color('#000000')\n\n \"\"\"Detailed host row format\"\"\"\n host_row_format = report.add_format()\n host_row_format.set_border(style=1)\n host_row_format.set_bg_color('#CCCCCC')\n\n \"\"\"Format for text in row with host info\"\"\"\n host_row_wrapped_format = report.add_format()\n host_row_wrapped_format.set_border(style=1)\n host_row_wrapped_format.set_bg_color('#CCCCCC')\n host_row_wrapped_format.set_text_wrap('vjustify')\n\n \"\"\"Format description row in NSE output\"\"\"\n host_nse_output_top_format = report.add_format({'bold': True})\n host_nse_output_top_format.set_border(style=1)\n host_nse_output_top_format.set_bg_color('#B8B8B8')\n\n \"\"\"Format test row in NSE output\"\"\"\n host_nse_output_format = report.add_format()\n host_nse_output_format.set_border(style=1)\n host_nse_output_format.set_bg_color('#CCCCCC')\n\n \"\"\"Build the host_overview_worksheet\"\"\"\n host_overview_worksheet = report.add_worksheet()\n\n \"\"\"Build the host_detail_worksheet\"\"\"\n host_detail_worksheet = report.add_worksheet()\n\n \"\"\"Size up the overview worksheet\"\"\"\n host_overview_worksheet.set_column('B:B', 24)\n host_overview_worksheet.set_column('C:C', 15)\n host_overview_worksheet.set_column('D:D', 15)\n host_overview_worksheet.set_column('E:E', 15)\n host_overview_worksheet.set_column('F:F', 15)\n host_overview_worksheet.set_column('G:G', 20)\n host_overview_worksheet.set_column('H:H', 15)\n\n \"\"\"Size up the detail worksheet\"\"\"\n host_detail_worksheet.set_column('B:B', 38)\n host_detail_worksheet.set_column('C:C', 16)\n host_detail_worksheet.set_column('D:D', 16)\n host_detail_worksheet.set_column('E:E', 28)\n host_detail_worksheet.set_column('F:F', 15)\n host_detail_worksheet.set_column('H:G', 20)\n host_detail_worksheet.set_column('H:H', 25)\n host_detail_worksheet.set_column('I:I', 10)\n\n \"\"\"Description row for host overview\"\"\"\n host_overview_worksheet.write('B2', 'Hostname', top_row_format)\n host_overview_worksheet.write('C2', 'IP v4 Address', top_row_format)\n host_overview_worksheet.write('D2', 'IP v6 Address', top_row_format)\n host_overview_worksheet.write('E2', 'MAC Address', top_row_format)\n host_overview_worksheet.write('F2', 'MAC Vendor', top_row_format)\n host_overview_worksheet.write('G2', 'Operating System', top_row_format)\n host_overview_worksheet.write('H2', 'Host Type', top_row_format)\n\n \"\"\"Query the database for the hosts\"\"\"\n inventory_hosts = session.query(InventoryHost).all()\n\n \"\"\"Build overview worksheet\"\"\"\n overview_row = 2\n overview_col = 1\n for host in inventory_hosts:\n host_overview_worksheet.write(overview_row, overview_col, host.host_name, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 1, host.ipv4_addr, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 2, host.ipv6_addr, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 3, host.macaddr, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 4, host.mac_vendor.name, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 5, host.product.name, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 6, host.host_type, host_row_format)\n overview_row += 1\n\n \"\"\"Build detailed worksheet\"\"\"\n detail_row = 2\n detail_col = 1\n for host in inventory_hosts:\n\n \"\"\"Add the black row to start host detail info\"\"\"\n host_detail_worksheet.set_row(detail_row, 5)\n host_detail_worksheet.write(detail_row, detail_col, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, '', black_row_format)\n detail_row += 1\n\n \"\"\"Add row detail info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Hostname', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'IP v4 Address', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'IP v6 Address', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'MAC Address', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'MAC Vendor', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'Host Type', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'Operating System', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'Version', top_row_format)\n detail_row += 1\n\n \"\"\"Add host info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, host.host_name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, host.ipv4_addr, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, host.ipv6_addr, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, host.macaddr, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, host.mac_vendor.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, host.host_type, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, host.product.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, host.product.version, host_row_format)\n detail_row += 2\n\n \"\"\"If there is no host nse script, just say so.\"\"\"\n if not host.host_nse_scripts:\n host_detail_worksheet.write(detail_row, detail_col, 'Host NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n host_detail_worksheet.write(detail_row, detail_col, 'No Script Name', host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'No Script Output', host_row_wrapped_format)\n detail_row += 2\n else:\n\n \"\"\"Add the row detail\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Host NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n\n \"\"\"Grab all the scripts\"\"\"\n for host_scripts in host.host_nse_scripts:\n\n \"\"\"Count output the lines so we know what to merge\"\"\"\n lines = host_scripts.output.count('\\n')\n\n if lines > 0:\n\n \"\"\"Merge the rows and write the name and output\"\"\"\n host_detail_worksheet.merge_range(detail_row, detail_col, detail_row + lines, detail_col,\n host_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines, detail_col + 7,\n host_scripts.output, host_row_wrapped_format)\n detail_row += 1\n else:\n\n \"\"\"Single line output\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, host_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines, detail_col + 7,\n host_scripts.output, host_row_wrapped_format)\n detail_row += 1\n\n if not host.inventory_svcs:\n\n \"\"\"If there are no services for this host tell me\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Protocol', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'Port', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'Name', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'Svc Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'Extra Info', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'Version', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'Update', top_row_format)\n detail_row += 1\n\n host_detail_worksheet.write(detail_row, detail_col, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'no services', host_row_format)\n detail_row += 1\n\n else:\n for ports in host.inventory_svcs:\n\n \"\"\"Host services row info\"\"\"\n detail_row += 1\n host_detail_worksheet.write(detail_row, detail_col, 'Protocol', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'Port', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'Name', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'Svc Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'Extra Info', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'Version', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'Update', top_row_format)\n detail_row += 1\n\n \"\"\"Write the service info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, ports.protocol, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, ports.portid, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, ports.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, ports.svc_product, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, ports.extra_info, host_row_format)\n try:\n\n \"\"\"There may not be product info, but try.\"\"\"\n host_detail_worksheet.write(detail_row, detail_col + 5, ports.product.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, ports.product.version, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, ports.product.product_update,\n host_row_format)\n detail_row += 1\n except AttributeError:\n\n \"\"\"Just write unknown if there is no product info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col + 5, 'unknown', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'unknown', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'unknown', host_row_format)\n detail_row += 1\n\n if not ports.svc_nse_scripts:\n\n \"\"\"If there is no NSE script info just say so.\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Svc NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n host_detail_worksheet.write(detail_row, detail_col, 'No Script Name', host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'No Script Output', host_row_wrapped_format)\n detail_row += 2\n\n else:\n\n \"\"\"Service Script row detail\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Svc NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n\n \"\"\"Grab all the scripts\"\"\"\n for nse_scripts in ports.svc_nse_scripts:\n\n \"\"\"Count the lines in the output for merging\"\"\"\n lines = nse_scripts.output.count('\\n')\n\n if lines > 0:\n\n \"\"\"Merge the rows and write the name and output\"\"\"\n host_detail_worksheet.merge_range(detail_row, detail_col, detail_row + lines, detail_col,\n nse_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines, detail_col + 7,\n nse_scripts.output, host_row_wrapped_format)\n detail_row += 1\n else:\n\n \"\"\"Single line output\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, nse_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines,\n detail_col + 7, nse_scripts.output,\n host_row_wrapped_format)\n detail_row += 1\n\n detail_row += 1\n report.close()\n session.close()",
"def generate_ref_sheet(output_path):\r\n #header of reference sheet\r\n ref_out = Path(output_path)\r\n reference_elements=[]\r\n title = Paragraph('Validation Reference Sheet', style=styles['title'])\r\n reference_elements.append(title)\r\n\r\n #creating table with properties of metric scores\r\n metric_ref = (['Metrics Reference Table','','',''],\r\n ['Name', 'Attribute','Range','Perfect Score'],\r\n ['Percent Correct','Accuracy','0 to 1', '1'],\r\n ['Bias','Bias','0 to infinity','1'],\r\n ['Hit Rate','Discrimination','0 to 1','1'],\r\n ['False Alarm Rate','Discrimination','0 to 1', '0'],\r\n ['Frequency of Misses','Discrimination','0 to 1','0'],\r\n ['Probability of Correct Negatives','Discrimination',\r\n '0 to 1','1'],\r\n ['Frequency of Hits','Reliability and Resolution',\r\n '0 to 1','1'],\r\n ['False Alarm Ratio','Reliability and Resolution',\r\n '0 to 1','1'],\r\n ['Detection Failure Ratio','Reliability and Resolution',\r\n '0 to 1','0'],\r\n ['Frequency of Correct Negatives',\r\n 'Reliability and Resolution','0 to 1','1'],\r\n ['Threat Score','Accuracy','0 to 1','1'],\r\n ['Odds Ratio','Accuracy','0 to infinity','infinity'],\r\n ['Skill Scores','','',''],\r\n ['True Skill Score','','-1 to 1','1'],\r\n ['Heidke Skill Score','','-1 to 1','1'],\r\n ['Odds Ratio Skill Score','','-1 to 1','1'],\r\n ['Relative Operating Characteristic Skill Score (RSS)','',\r\n '0 to 1','1'],\r\n ['Mean Percentage Error','','-infinity to infinity','0'],\r\n ['Mean Absolute Percentage Error','','0 to infinity','0'])\r\n\r\n metric_ref_table=Table(metric_ref)\r\n metric_ref_table.setStyle(TableStyle([('BACKGROUND',(0,2),(3,13),\r\n colors.lightgrey),\r\n ('BACKGROUND',(0,15),(3,20),\r\n colors.lightgrey),\r\n ('GRID',(0,1),(3,20),0.25,\r\n colors.black),\r\n ('SPAN',(0,0),(3,0)),\r\n ('SPAN',(0,14),(3,14)),\r\n ('SPAN',(0,15),(1,15)),\r\n ('SPAN',(0,16),(1,16)),\r\n ('SPAN',(0,17),(1,17)),\r\n ('SPAN',(0,18),(1,18)),\r\n ('SPAN',(0,19),(1,19)),\r\n ('SPAN',(0,20),(1,20))]))\r\n\r\n #writing report\r\n reference_elements.append(metric_ref_table)\r\n\r\n reference = SimpleDocTemplate(str(ref_out / 'validation_reference.pdf'),\r\n pagesize=letter)\r\n reference.build(reference_elements)",
"def export_hamilton(args):\n if args.type == 'filling_out':\n clarity_epp.export.hamilton.samplesheet_filling_out(lims, args.process_id, args.output_file)\n elif args.type == 'purify':\n clarity_epp.export.hamilton.samplesheet_purify(lims, args.process_id, args.output_file)",
"def export_wells(self, w, title):\r\n self._check_out(title)\r\n np.savez_compressed(os.path.join(self.out_dir, title, title), w)",
"def render_sheet_to_file(self, file_name, sheet, **keywords):\n raise NotImplementedError(\"We are not writing to file\")",
"def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.ministerial.get_excel_sheet(rpt_date, book)\n self.ministerial_auth.get_excel_sheet(rpt_date, book)\n self.ministerial_268.get_excel_sheet(rpt_date, book)\n self.quarterly.get_excel_sheet(rpt_date, book)\n self.by_tenure.get_excel_sheet(rpt_date, book)\n self.by_cause.get_excel_sheet(rpt_date, book)\n self.region_by_tenure.get_excel_sheet(rpt_date, book)\n self.indicator.get_excel_sheet(rpt_date, book)\n self.by_cause_10YrAverage.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 1')\n book.save(response)\n\n return response",
"def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_by_cause_10yr_average_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response",
"def test_parse_samplesheet(self):\n run_dir = 'data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2'\n run = MinIONqc(run_dir, None, None)\n run.lims_samplesheet = 'data/nanopore_samplesheets/2020/DELIVERY_SQK-LSK109_AAU644_Samplesheet_24-594126.csv'\n run._parse_samplesheet()\n self.assertTrue(filecmp.cmp(run.nanoseq_sample_sheet, 'data/nanopore_samplesheets/expected/SQK-LSK109_sample_sheet.csv'))\n self.assertTrue(filecmp.cmp(run.anglerfish_sample_sheet, 'data/nanopore_samplesheets/expected/anglerfish_sample_sheet.csv'))",
"def load_youd_bartlett_demo():\n return read_excel(join(dirname(__file__),'YoudHansenBartlett2002_demo.xls'))",
"def export_caliper(args):\n if args.type == 'normalise':\n clarity_epp.export.caliper.samplesheet_normalise(lims, args.process_id, args.output_file)\n elif args.type == 'dilute':\n clarity_epp.export.caliper.samplesheet_dilute(lims, args.process_id, args.output_file)",
"def export_sample_indications(args):\n clarity_epp.export.sample.sample_indications(\n lims, args.output_file, args.artifact_name, args.sequencing_run, args.sequencing_run_project\n )",
"def export_sample(sample):\n outfile = os.path.join(sample.work_directory, sample.sample_id + '_sample.json')\n # print pretty JSON: print(json.dumps(parser.reads,indent=4, cls=CustomEncoder))\n with open(outfile, 'w') as out:\n # out.write(json.dumps(sample, indent=4, cls=CustomEncoder))\n json.dump(sample, out, cls=CustomEncoder)",
"def write(cls, experiment: Experiment):\n cls.__mutex.acquire()\n os.makedirs('./temp', exist_ok=True)\n worksheet = cls.__workbook.add_worksheet(experiment.name)\n for i, value in enumerate(experiment.values.items()):\n worksheet.write(0, i, value[0])\n worksheet.write_column(1, i, value[1])\n if experiment.model == 'accuracy':\n # cls.__add_accuracy_plot(worksheet, value)\n cls.test(worksheet, value)\n\n pass\n\n if experiment.model == 'performance':\n cls.test(worksheet, value)\n pass\n # cls.__add_accuracy_plot(worksheet, value)\n\n cls.__mutex.release()",
"def main():\n credentials = service_account.Credentials.from_service_account_file(\n CREDENTIALS_FILE,\n scopes=SCOPES,\n subject=SUBJECT)\n service = build('sheets', 'v4', credentials=credentials)\n\n # Call the Sheets API\n sheet = service.spreadsheets()\n result = sheet.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=SAMPLE_RANGE_NAME).execute()\n values = result.get('values', [])\n\n if not values:\n print('No data found.')\n else:\n print('Name, Major:')\n for row in values:\n # Print columns A and E, which correspond to indices 0 and 4.\n print('%s, %s' % (row[0], row[4]))",
"def extract(self):\n \n print('Extracting Metrics data... ',end=''),\n self.df = pd.read_excel(self.file_path, index_col=0)\n print('Done')",
"def render_sheet(self, sheet, **keywords):\n raise NotImplementedError(\"Please render sheet\")"
]
| [
"0.6868604",
"0.6461967",
"0.5999006",
"0.59804803",
"0.586817",
"0.582595",
"0.5779311",
"0.5733163",
"0.57305694",
"0.55586296",
"0.5525661",
"0.5509233",
"0.5470714",
"0.54577184",
"0.5450282",
"0.54479027",
"0.54193574",
"0.5349859",
"0.52579254",
"0.524692",
"0.52374053",
"0.5232408",
"0.5223961",
"0.5215344",
"0.52135825",
"0.52020454",
"0.5201677",
"0.5185087",
"0.514286",
"0.5141338"
]
| 0.7068607 | 0 |
Export removed samples table. | def export_removed_samples(args):
clarity_epp.export.sample.removed_samples(lims, args.output_file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prepare_cohorts_for_metadata_export(all_samples, blacklist=[]):\n # Prepare for FC export format\n data = all_samples.rename(columns={'cohort_code': 'membership:sample_set_id', 'entity:sample_id': 'sample_id'})\n data_clean = data[['membership:sample_set_id', 'sample_id']]\n\n # Remove blacklist\n data_clean = data_clean[ ~data_clean['sample_id'].isin(blacklist)]\n\n return data_clean",
"def export_data(self, pth):\n self.cleanup_allowed = False\n self.train_df.to_csv(os.path.join(pth, \"train.csv\"))\n self.valid_df.to_csv(os.path.join(pth, \"valid.csv\"))\n self.test_df.to_csv(os.path.join(pth, \"test.csv\"))",
"def drop_table(self):\n for ss in self.spectrae:\n ss.tau[('H',1,1215)] = np.array([0])",
"def _load_sample_table(self):\n self.sampleTable = pd.read_table(self.config['sampletable'], sep='\\t', dtype=str)\n self.sampleTable.set_index('sampleID', inplace=True)\n self.samples = self.sampleTable.reset_index().to_dict('records')",
"def remove(table, id_):\n\n entry_index = 0\n for entry in table:\n entry_id_ = entry[0]\n if entry_id_ == id_:\n del table[entry_index]\n entry_index += 1\n data_manager.write_table_to_file(\"model/sales/sales.csv\", table)\n return table",
"def sample_table(self):\n if self[SAMPLE_EDIT_FLAG_KEY]:\n _LOGGER.debug(\"Generating new sample_table DataFrame\")\n self[SAMPLE_EDIT_FLAG_KEY] = False\n new_df = self._get_table_from_samples(index=self.st_index)\n self._sample_table = new_df\n return new_df\n\n _LOGGER.debug(\"Returning stashed sample_table DataFrame\")\n return self._sample_table",
"def delete_selected_rows(self):\n self._export_mode = 'delete'\n self._counter_update_data += 1",
"def remove_data():\n # Removing the existing data\n col_answer_given.remove()\n col_answer_not_given.remove()\n col_q_not_given.remove()\n col_to_summarize.remove()",
"def _minimal_export_traces(self, outdir=None, analytes=None,\n samples=None, subset=None):\n if analytes is None:\n analytes = self.analytes\n elif isinstance(analytes, str):\n analytes = [analytes]\n\n if samples is not None:\n subset = self.make_subset(samples)\n elif not hasattr(self, 'subsets'):\n self.make_subset()\n\n if subset is None:\n samples = self.subsets['All_Analyses']\n else:\n try:\n samples = self.subsets[subset]\n except:\n raise ValueError((\"Subset '{:s}' does not .\".format(subset) +\n \"exist.\\nRun 'make_subset' to create a\" +\n \"subset.\"))\n\n focus_stage = 'rawdata'\n ud = 'counts'\n\n if not os.path.isdir(outdir):\n os.mkdir(outdir)\n\n for s in samples:\n d = self.data_dict[s].data[focus_stage]\n out = {}\n\n for a in analytes:\n out[a] = d[a]\n\n out = pd.DataFrame(out, index=self.data_dict[s].Time)\n out.index.name = 'Time'\n\n header = ['# Minimal Reproduction Dataset Exported from LATOOLS on %s' %\n (time.strftime('%Y:%m:%d %H:%M:%S')),\n \"# Analysis described in '../analysis.log'\",\n '# Run latools.reproduce to import analysis.',\n '#',\n '# Sample: %s' % (s),\n '# Analysis Time: ' + self.data_dict[s].meta['date']]\n\n header = '\\n'.join(header) + '\\n'\n\n csv = out.to_csv()\n\n with open('%s/%s.csv' % (outdir, s), 'w') as f:\n f.write(header)\n f.write(csv)\n return",
"def remove_data(self, remove_without_confirmation = False):\n if self.verbose > 0:\n print(\"SpectraTools.Hitran.remove_data()\") \n \n if not remove_without_confirmation:\n answer = input(\"Do you want to delete {:}? yes/no [no] \".format(self.tablename))\n print(answer)\n if answer != \"yes\":\n print(\"Removal of data was canceled by the user\")\n return 0\n\n hapi.dropTable(self.tablename)\n \n filepath = self.db_path.joinpath(pathlib.Path(\"{:s}.data\".format(self.tablename)))\n if filepath.is_file():\n os.remove(filepath)\n\n filepath = self.db_path.joinpath(pathlib.Path(\"{:s}.header\".format(self.tablename)))\n if filepath.is_file():\n os.remove(filepath)",
"def test_no_removed_datasets(self):\n removed_dataset_1 = factories.SourceDatasetFactory.create(source_study_version=self.study_version_1)\n removed_dataset_2 = factories.SourceDatasetFactory.create(\n source_study_version=self.study_version_2, i_accession=removed_dataset_1.i_accession)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n self.assertNotIn(removed_dataset_1, table.data)\n self.assertNotIn(removed_dataset_2, table.data)\n self.assertEqual(len(table.data), 0)",
"def samples_to_csv(self, samples):\r\n if conf.instance[\"general\"][\"output\"][\"samples_to_csv\"]:\r\n samples.write_table(filename=self._samples_file)\r\n samples.info_to_json(filename=self._info_file)\r\n if isinstance(samples, SamplesPDF):\r\n try:\r\n samples.save_covariance_matrix(self._covariance_file)\r\n except ValueError as e:\r\n logger.warning(\r\n f\"Could not save covariance matrix because of the following error:\\n{e}\"\r\n )",
"def remove(table, id_):\n\n common.toremoveid(\"inventory/inventory.csv\",data_manager.get_table_from_file(\"inventory/inventory.csv\"),id_)",
"def clear_data_from_table():\n global data_base, table\n sqlite3_simple_clear_table(data_base, table)\n output_on_display.delete(1.0, END)\n output_on_display.insert(END, '')\n return",
"def export_tasks(self, samples, features, export_id):\n samples_for_sharding = samples.randomColumn('shard_split')\n for i in range(self.num_shards):\n range_min = float(i) / float(self.num_shards)\n range_max = float(i + 1) / float(self.num_shards)\n range_filter = ee.Filter.And(\n ee.Filter.gte('shard_split', range_min),\n ee.Filter.lt('shard_split', range_max))\n samples_to_export = samples_for_sharding.filter(range_filter)\n\n task = ee.batch.Export.table.toCloudStorage(\n collection=samples_to_export,\n description=export_id + \"_%i\" % i,\n bucket=self.bucket,\n fileNamePrefix=self.directory + '/' + export_id + \"_%i\" % i,\n fileFormat='TFRecord',\n selectors=features,\n maxWorkers=2000)\n\n # Can be a stopping call if TaskManager if busy.\n self.task_manager.submit(task)",
"def remove(table, id_):\n # 3\n for index in range(len(table)):\n if table[index][0] == id_:\n table.pop(index)\n data_manager.write_table_to_file('accouting/items.csv', table)\n\n return table",
"def test_table_has_no_rows(self):\n models.SourceDataset.objects.all().delete()\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n self.assertEqual(len(table.rows), 0)",
"def remove(table, id_):\n\n # your code\n\n common.toremoveid(\"store/games.csv\",data_manager.get_table_from_file(\"store/games.csv\"),id_)",
"def export_table(path, path_out):\n table = rb.get_table(path)\n table.to_csv(path_out, index=False)\n return",
"def remove(table, id_):\n\n # 3\n for index in range(len(table)):\n if table[index][0] == id_:\n table.pop(index)\n data_manager.write_table_to_file('hr/persons.csv', table)\n return table",
"def teardown(self):\n super(TestCisPandasOutput, self).teardown()\n if os.path.isfile(self.tempfile):\n os.remove(self.tempfile)",
"def samples(app, args):\n engine = create_engine(args.datafile)\n meta = MetaData()\n meta.reflect(engine)\n print(\"\\t\".join([str(x).replace('counts.', '')\n for x in meta.tables['counts'].columns\n if not x == 'counts.index']))",
"def export_table_to_cloudstorage(fc,description,fileNamePrefix):\n \n task = ee.batch.Export.table.toCloudStorage(\n collection = ee.FeatureCollection(fc),\n description = description,\n bucket = GCS_BUCKET,\n fileNamePrefix = GCS_OUTPUT_PATH + fileNamePrefix,\n fileFormat = \"CSV\"\n )\n task.start()",
"def save(self, labpath: str) -> None:\n self._table.to_csv(labpath, index=False)\n print(\"# Save experimental data into {0}\".format(labpath))",
"def del_table():\n global data_base, table, output_on_display\n try:\n sqlite3_simple_delete_table(data_base, table)\n list_tables = update_list_tables(data_base)\n list_tables.clear()\n output_on_display.delete(1.0, END)\n output_on_display.insert(END, '')\n return\n except sqlite3.OperationalError:\n mistake_del_table()",
"def basic_table_eject():\n tbl: pa.Table = pa.Table.from_pylist([\n {'first_name': 'Kakashi', 'last_name': 'Hatake', },\n {'first_name': 'Itachi', 'last_name': 'Uchiha', },\n {'first_name': 'Shisui', 'last_name': 'Uchiha', },\n ])\n\n results = {\n # NOTE: Requires pandas installation (to_pandas)\n 'to_pandas > to_dict': tbl.to_pandas().to_dict(orient='records'),\n 'to_pydict': tbl.to_pydict(), # Dict[str, list]\n 'to_pylist': tbl.to_pylist(), # List[dict]\n 'to_string': tbl.to_string(), # str\n }\n\n pretty_print_result_map(results)",
"def test_filter_samples_from_otu_table_sample_ids_dense(self):\r\n otu_table = parse_biom_table_str(dense_otu_table1)\r\n\r\n # keep two samples\r\n expected_sample_ids = set(['PC.593', 'PC.607'])\r\n filtered_otu_table = filter_samples_from_otu_table(\r\n otu_table,\r\n expected_sample_ids,\r\n 0,\r\n inf)\r\n self.assertEqual(\r\n set(filtered_otu_table.SampleIds),\r\n expected_sample_ids)\r\n\r\n # keep some other samples\r\n expected_sample_ids = set(['PC.354', 'PC.635', 'PC.593'])\r\n filtered_otu_table = filter_samples_from_otu_table(\r\n otu_table,\r\n expected_sample_ids,\r\n 0,\r\n inf)\r\n self.assertEqual(\r\n set(filtered_otu_table.SampleIds),\r\n expected_sample_ids)",
"def test_table_has_no_rows(self):\n models.SourceDataset.objects.all().delete()\n response = self.client.get(self.get_url())\n context = response.context\n table = context['source_dataset_table']\n self.assertEqual(len(table.rows), 0)",
"def remove(table, id_):\n\n # your code\n\n key = common.check_for_key(id_,table)\n\n if key == None:\n ui.print_error_message('Key does not exist')\n else:\n table.pop(key)\n data_manager.write_table_to_file('hr/persons.csv', table) \n\n #print(table)\n return table",
"def Dump():\n with open(path.join(MAIN_PATH, INST), \"wb\") as f:\n writer = csv.writer(f, delimiter=\",\")\n\n for inst in instances:\n writer.writerow(inst)\n \n with open(path.join(MAIN_PATH, \"test_instances.csv\"), \"wb\") as f:\n writer = csv.writer(f, delimiter=\",\")\n\n for inst in test_instances:\n writer.writerow(inst)"
]
| [
"0.5847827",
"0.5772176",
"0.57479125",
"0.5740398",
"0.5681225",
"0.5638062",
"0.5614595",
"0.5612168",
"0.5602861",
"0.5550865",
"0.5547393",
"0.5537967",
"0.55224085",
"0.5499635",
"0.54904026",
"0.5487787",
"0.54560983",
"0.5452331",
"0.5449576",
"0.5392043",
"0.5377981",
"0.53717023",
"0.5358973",
"0.53566563",
"0.53546727",
"0.5353199",
"0.53225446",
"0.5309045",
"0.53046095",
"0.5285475"
]
| 0.6860491 | 0 |
Export related MIP samples | def export_sample_related_mip(args):
clarity_epp.export.sample.sample_related_mip(lims, args.process_id, args.output_file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def export_removed_samples(args):\n clarity_epp.export.sample.removed_samples(lims, args.output_file)",
"def export_sample_indications(args):\n clarity_epp.export.sample.sample_indications(\n lims, args.output_file, args.artifact_name, args.sequencing_run, args.sequencing_run_project\n )",
"def samples(self):\n pass",
"def save_samples(samples, output_prefix=\"sample\"):\n\n for (i, vertices) in enumerate(samples):\n vertex_fname = \"{pref}{i}_vertices.ply\".format(pref=output_prefix, i=i)\n if os.path.dirname(vertex_fname) == \"\":\n vertex_fname = \"./\" + vertex_fname\n mesh_io.Mesh.write_vertices_ply(None, vertex_fname, coords=vertices)",
"def samples(self):\n return glob.glob(os.path.join(self.production.rundir, \"extrinsic_posterior_samples.dat\"))",
"def export_bioanalyzer(args):\n clarity_epp.export.bioanalyzer.samplesheet(lims, args.process_id, args.output_file)",
"def export_sample(sample):\n outfile = os.path.join(sample.work_directory, sample.sample_id + '_sample.json')\n # print pretty JSON: print(json.dumps(parser.reads,indent=4, cls=CustomEncoder))\n with open(outfile, 'w') as out:\n # out.write(json.dumps(sample, indent=4, cls=CustomEncoder))\n json.dump(sample, out, cls=CustomEncoder)",
"def export_tapestation(args):\n clarity_epp.export.tapestation.samplesheet(lims, args.process_id, args.output_file)",
"def samples(self, gp):\r\n raise NotImplementedError",
"def convert_testing_data(mfccPath):\n inputlist, inputnamelist = ark_parser(mfccPath, 'test.ark')\n\n print(\"%d sample in testing set\" % len(inputlist))\n with open('./test_data.pkl', 'wb') as test_data:\n pickle.dump(inputlist, test_data)\n \n with open('./test_name.pkl', 'wb') as test_name:\n pickle.dump(inputnamelist, test_name)",
"def export_dataset(self):\n raise NotImplementedError",
"def prepare_batch_sample_set_for_metadata_export(path, tsca_id):\n raw = pd.read_table(path)\n print( \"%d Samples in this batch\" % raw.shape[0] )\n\n # Create dfs to upload\n all_samples = pd.concat([pd.DataFrame(index=raw.index, columns=['membership:sample_set_id'], data=tsca_id), \\\n raw[ ['sample_id', 'sample_type'] ]], axis=1)\n\n\n tumors = all_samples.loc[ all_samples['sample_type'] == \"Tumor\", ['membership:sample_set_id', 'sample_id'] ]\n tumors.loc[: , 'membership:sample_set_id'] = \"%s_T\"%tsca_id\n \n normals = all_samples.loc[ all_samples['sample_type'] == \"Normal\", ['membership:sample_set_id', 'sample_id'] ]\n normals.loc[: , 'membership:sample_set_id'] = \"%s_N\"%tsca_id\n\n all_samples = all_samples.drop('sample_type', axis=1)\n return (all_samples, tumors, normals)",
"def create_materials(endpoint):\n for phenotype in get_phenotypes(endpoint):\n print(phenotype)\n # for now, creating the sample name combining studyDbId and potDbId -\n # eventually this should be observationUnitDbId\n sample_name = phenotype['studyDbId']+\"_\"+phenotype['plotNumber']\n this_sample = Sample(name=sample_name)\n that_source = Source(phenotype['germplasmName'], phenotype['germplasmDbId'])\n this_sample.derives_from = that_source",
"def tgt_samples(self, params):\r\n def save_json(save_path, file_id, samples):\r\n init_logger()\r\n for i, sample in enumerate(samples):\r\n save_ = os.path.join(save_path, \"{:s}_{:d}.json\".format(file_id, i))\r\n with open(save_, 'w') as file:\r\n json.dump(sample, file)\r\n logger.info(\"{:s} saved at {:s}\".format(save_, save_path))\r\n\r\n\r\n json_file, save_path = params\r\n init_logger()\r\n _, tgt = self.load_json(json_file)\r\n\r\n file_id = json_file.split(\"/\")[-1].split(\".\")[0]\r\n if len(tgt) >= self.args.min_sents_num and len(tgt) <= self.args.max_sents_num:\r\n tgt_ = list(tgt)\r\n random.seed(66)\r\n random.shuffle(tgt_)\r\n\r\n # make sentence pair and write in a single file\r\n positive_sents = tgt\r\n positive_pairs = [(positive_sents[i], positive_sents[i+1]) for i in range(len(positive_sents)-1)]\r\n\r\n negative_sents = tgt_\r\n negative_pairs = [(negative_sents[i], negative_sents[i+1]) for i in range(len(negative_sents)-1)]\r\n\r\n positive_samples = [{\"tgt\": pair, \"coherence\": 0} for pair in positive_pairs] # 0 represents coherent\r\n negative_samples = [{\"tgt\": pair, \"coherence\": 1} for pair in negative_pairs] # 1 represents incoherent\r\n\r\n save_json(save_path, file_id, positive_samples)\r\n save_json(save_path, file_id+\"_r\", negative_samples)",
"def export_sampleStorage_csv(self, sample_ids_I, filename_O):\n\n data_O = [];\n for sample_id in sample_ids_I:\n data_tmp =[];\n data_tmp = self.get_rows_sampleID_limsSampleStorage(sample_id);\n data_O.extend(data_tmp);\n if data_O:\n io = base_exportData(data_O);\n io.write_dict2csv(filename_O);",
"def export_tecan(args):\n clarity_epp.export.tecan.samplesheet(lims, args.process_id, args.type, args.output_file)",
"def write_inputs(sampling_data):\n datanames = sampling_data.dtype.names\n tarputs = tarfile.open('smpl_mcnp_depl_inps.tar', 'w')\n for num, sample in enumerate(sampling_data):\n input = HomogeneousInput(sample['core_r'],\n sample['core_r']*sample['AR'],\n sample['power'])\n homog_comp = input.homog_core(sample['enrich'],\n sample['cool_r'],\n sample['PD'])\n input.write_mat_string(homog_comp)\n \n # identifying header string for post-processing\n header_str = ''\n for param in dimensions:\n header_str += str(round(sample[param], 5)) + ','\n # write the input and tar it\n filename = input.write_input(num, header_str)\n tarputs.add(filename)\n\n # write HTC input list\n htc_inputs = open('input_list.txt', 'w')\n htc_inputs.write('\\n'.join(glob.glob(\"*.i\")))\n htc_inputs.close()\n \n tarputs.add('input_list.txt')\n tarputs.close()",
"def create_samples(self):\n self._samples = self.load_samples()\n self.modify_samples()",
"def _minimal_export_traces(self, outdir=None, analytes=None,\n samples=None, subset=None):\n if analytes is None:\n analytes = self.analytes\n elif isinstance(analytes, str):\n analytes = [analytes]\n\n if samples is not None:\n subset = self.make_subset(samples)\n elif not hasattr(self, 'subsets'):\n self.make_subset()\n\n if subset is None:\n samples = self.subsets['All_Analyses']\n else:\n try:\n samples = self.subsets[subset]\n except:\n raise ValueError((\"Subset '{:s}' does not .\".format(subset) +\n \"exist.\\nRun 'make_subset' to create a\" +\n \"subset.\"))\n\n focus_stage = 'rawdata'\n ud = 'counts'\n\n if not os.path.isdir(outdir):\n os.mkdir(outdir)\n\n for s in samples:\n d = self.data_dict[s].data[focus_stage]\n out = {}\n\n for a in analytes:\n out[a] = d[a]\n\n out = pd.DataFrame(out, index=self.data_dict[s].Time)\n out.index.name = 'Time'\n\n header = ['# Minimal Reproduction Dataset Exported from LATOOLS on %s' %\n (time.strftime('%Y:%m:%d %H:%M:%S')),\n \"# Analysis described in '../analysis.log'\",\n '# Run latools.reproduce to import analysis.',\n '#',\n '# Sample: %s' % (s),\n '# Analysis Time: ' + self.data_dict[s].meta['date']]\n\n header = '\\n'.join(header) + '\\n'\n\n csv = out.to_csv()\n\n with open('%s/%s.csv' % (outdir, s), 'w') as f:\n f.write(header)\n f.write(csv)\n return",
"def generate_dataset(self):\n\t\timg_set = []\n\t\tqa_set = []\n\t\tfor i in range(self.config.dataset_size):\n\t\t\timg, r = self.generate_image()\n\t\t\tq = self.generate_question()\n\t\t\ta = self.generate_answer(r, q)\n\t\t\timg_sample = {\n\t\t\t\t'id': i,\n\t\t\t\t'image': img.tolist()\n\t\t\t}\n\t\t\timg_set.append(img_sample)\n\t\t\tfor j in range(len(q)):\n\t\t\t\tqa_sample = {\n\t\t\t\t\t'id': i,\n\t\t\t\t\t'question': q[j].tolist(),\n\t\t\t\t\t'answer': a[j].tolist()\n\t\t\t\t}\n\t\t\t\tqa_set.append(qa_sample)\n\t\tprint('Finished creating smaples')\n\t\tdataset = {\n\t\t\t'image':\timg_set,\n\t\t\t'qa':\tqa_set\n\t\t}\n\t\twith open(self.path, 'w') as f:\n\t\t\tjson.dump(dataset, f)",
"def gen_sample_report():\n sample_report().save()",
"def test_intent_classifier_get_testing_samples(self):\n pass",
"def create_samples(self):\n for s_id in range(len(self.data[\"sample\"])):\n self.samples.add(Sample(s_id, [self.data[key][s_id] for key in self.data.keys() if key not in WRONG_KEYS],\n self.data[\"label\"][s_id]))",
"def sample(self):",
"def get_samples(self) -> McmcPtResult:",
"def samples(self, gp, Y_metadata=None, samples=1):\n raise NotImplementedError(\"\"\"May be possible to use MCMC with user-tuning, see\n MCMC_pdf_samples in likelihood.py and write samples function\n using this, beware this is a simple implementation\n of Metropolis and will not work well for all likelihoods\"\"\")",
"def qc_sample_mip(args):\n clarity_epp.qc.sample.set_mip_data_ready(lims, args.process_id)",
"def serialize_samples(self, writer:h5py.File, data_file:str, label_file:str):\n \n raise NotImplementedError('Method not implemented!')",
"def export_embeddings(self):\n save_path = self.config.path_embeddings / self.model.model_name\n save_path.mkdir(parents=True, exist_ok=True)\n \n idx2ent = self.model.config.knowledge_graph.read_cache_data('idx2entity')\n idx2rel = self.model.config.knowledge_graph.read_cache_data('idx2relation')\n\n\n series_ent = pd.Series(idx2ent)\n series_rel = pd.Series(idx2rel)\n series_ent.to_pickle(save_path / \"ent_labels.pickle\")\n series_rel.to_pickle(save_path / \"rel_labels.pickle\")\n\n with open(str(save_path / \"ent_labels.tsv\"), 'w') as l_export_file:\n for label in idx2ent.values():\n l_export_file.write(label + \"\\n\")\n\n with open(str(save_path / \"rel_labels.tsv\"), 'w') as l_export_file:\n for label in idx2rel.values():\n l_export_file.write(label + \"\\n\")\n\n for parameter in self.model.parameter_list:\n all_ids = list(range(0, int(parameter.shape[0])))\n stored_name = parameter.name.split(':')[0]\n # import pdb; pdb.set_trace()\n\n if len(parameter.shape) == 2:\n all_embs = parameter.numpy()\n with open(str(save_path / (\"%s.tsv\" % stored_name)), 'w') as v_export_file:\n for idx in all_ids:\n v_export_file.write(\"\\t\".join([str(x) for x in all_embs[idx]]) + \"\\n\")\n\n df = pd.DataFrame(all_embs)\n df.to_pickle(save_path / (\"%s.pickle\" % stored_name))",
"def generate_samples(self, n_samples):"
]
| [
"0.6802774",
"0.6612924",
"0.6183057",
"0.61627895",
"0.59162635",
"0.58976537",
"0.5858253",
"0.5856455",
"0.57867974",
"0.577075",
"0.5770604",
"0.57578796",
"0.57270014",
"0.5626266",
"0.5575579",
"0.5557229",
"0.5542678",
"0.549388",
"0.5487541",
"0.54692966",
"0.546072",
"0.54426897",
"0.5431257",
"0.5406738",
"0.5405604",
"0.5379773",
"0.5370652",
"0.5364142",
"0.53505886",
"0.5345155"
]
| 0.7658022 | 0 |
Export samplesheets for Tapestation machine. | def export_tapestation(args):
clarity_epp.export.tapestation.samplesheet(lims, args.process_id, args.output_file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def export_tecan(args):\n clarity_epp.export.tecan.samplesheet(lims, args.process_id, args.type, args.output_file)",
"def test_generate_sample_sheet(self):\n pass",
"def test_export_spreadsheet(self):\r\n client = self.getClient()\r\n if client:\r\n exp = [['#SampleID', 'DOB'],\r\n ['#Example mapping file for the QIIME analysis package. '\r\n 'These 9 samples are from a study of the effects of exercise '\r\n 'and diet on mouse cardiac physiology (Crawford, et al, '\r\n 'PNAS, 2009).'], ['PC.354', '20061218'],\r\n ['PC.355', '20061218'], ['PC.356', '20061126'],\r\n ['PC.481', '20070314'], ['PC.593', '20071210'],\r\n ['PC.607', '20071112'], ['PC.634', '20080116'],\r\n ['PC.635', '20080116'], ['PC.636', '20080116']]\r\n obs = _export_spreadsheet(client, self.spreadsheet_key,\r\n self.worksheet_id, ['#SampleID', 'DOB'])\r\n self.assertEqual(obs, exp)\r\n else:\r\n raise GoogleSpreadsheetConnectionError(\"Cannot execute test \"\r\n \"without an active Internet connection.\")",
"def write_clinical_samples_tsv(sheets):\n\n # Header lines, first item must start with #\n # attribute Display Names\n NAMES = [\"#Patient Identifier\", \"Sample Identifier\"]\n # attribute Descriptions\n DESC = [\"#Patient Identifier\", \"Sample Identifier\"]\n # attribute Datatype\n DATATYPE = [\"#STRING\", \"STRING\"]\n # attribute Priority\n PRIORITY = [\"#1\", \"1\"]\n # attribute columns\n COLUMNS = [\"PATIENT_ID\", \"SAMPLE_ID\"]\n\n with open(snakemake.output.samples_tsv, \"w\") as tsvfile:\n writer = csv.writer(tsvfile, delimiter=\"\\t\")\n # write header\n writer.writerow(NAMES)\n writer.writerow(DESC)\n writer.writerow(DATATYPE)\n writer.writerow(PRIORITY)\n writer.writerow(COLUMNS)\n\n for sheet in sheets:\n for p in sheet.bio_entities.values():\n for s in p.bio_samples.values():\n if s.extra_infos[\"isTumor\"]:\n writer.writerow([p.name, s.name])",
"def main():\n\n gephyrin_df = gephyrin_pairwise()\n cav31_df = cav31_pairwise()\n synapsin_df = synapsin_pairwise()\n psd_df = psd95_pairwise()\n vglut1_df = vglut1_pairwise()\n\n\n sheet_name = 'Pairwise'\n fn = 'pairwise_comparisons.xlsx'\n df_list = [synapsin_df, vglut1_df, psd_df, gephyrin_df, cav31_df]\n aa.write_dfs_to_excel(df_list, sheet_name, fn)",
"def driver():\n\n directory = r\"C:/Users/Aftab Alam/Documents/GitHub\"\n directory = directory + r\"/SRM-placement-analyser/data/\"\n fileList = [directory+\"InfosysResult.xlsx\",directory+\"TCSResult.xlsx\",directory+\"CognizantResult.xlsx\",directory+\"WiproResult.xlsx\"]\n \n listOfPlaced = extractCommonData.extractCommonData(fileList)\n createNewExcelSheet(directory,listOfPlaced)",
"def test_export_stp(self):\n\n os.system(\"rm test_solid.stp test_solid2.stp test_wire.stp\")\n\n self.test_shape.export_stp(\"test_solid.stp\", mode=\"solid\")\n self.test_shape.export_stp(\"test_solid2.stp\")\n self.test_shape.export_stp(\"test_wire.stp\", mode=\"wire\")\n\n assert Path(\"test_solid.stp\").exists() is True\n assert Path(\"test_solid2.stp\").exists() is True\n assert Path(\"test_wire.stp\").exists() is True\n\n assert Path(\"test_solid.stp\").stat().st_size == Path(\"test_solid2.stp\").stat().st_size\n assert Path(\"test_wire.stp\").stat().st_size < Path(\"test_solid2.stp\").stat().st_size\n\n os.system(\"rm test_solid.stp test_solid2.stp test_wire.stp\")",
"def test_export_stp(self):\n\n os.system(\"rm test_solid.stp test_solid2.stp test_wire.stp\")\n\n self.test_shape.export_stp(\"test_solid.stp\", mode=\"solid\")\n self.test_shape.export_stp(\"test_solid2.stp\")\n self.test_shape.export_stp(\"test_wire.stp\", mode=\"wire\")\n\n assert Path(\"test_solid.stp\").exists() is True\n assert Path(\"test_solid2.stp\").exists() is True\n assert Path(\"test_wire.stp\").exists() is True\n\n assert Path(\"test_solid.stp\").stat().st_size == Path(\"test_solid2.stp\").stat().st_size\n assert Path(\"test_wire.stp\").stat().st_size < Path(\"test_solid2.stp\").stat().st_size\n\n os.system(\"rm test_solid.stp test_solid2.stp test_wire.stp\")",
"def export_hamilton(args):\n if args.type == 'filling_out':\n clarity_epp.export.hamilton.samplesheet_filling_out(lims, args.process_id, args.output_file)\n elif args.type == 'purify':\n clarity_epp.export.hamilton.samplesheet_purify(lims, args.process_id, args.output_file)",
"def start_output(self):\r\n self.create_output_file()\r\n\r\n for elem in range(len(self.output_zakladki)):\r\n self.output_file.create_sheet(self.output_zakladki[elem], elem)\r\n\r\n self.remowe_first_sheet()",
"def export_caliper(args):\n if args.type == 'normalise':\n clarity_epp.export.caliper.samplesheet_normalise(lims, args.process_id, args.output_file)\n elif args.type == 'dilute':\n clarity_epp.export.caliper.samplesheet_dilute(lims, args.process_id, args.output_file)",
"def export_classification(out_name, table, asset_root, region, years, export='asset'):\n fc = ee.FeatureCollection(table)\n roi = ee.FeatureCollection(region)\n mask = roi.geometry().bounds().getInfo()['coordinates']\n\n classifier = ee.Classifier.randomForest(\n numberOfTrees=100,\n variablesPerSplit=0,\n minLeafPopulation=1,\n outOfBagMode=False).setOutputMode('CLASSIFICATION')\n\n input_props = fc.first().propertyNames().remove('YEAR').remove('POINT_TYPE').remove('system:index')\n\n trained_model = classifier.train(fc, 'POINT_TYPE', input_props)\n\n for yr in years:\n input_bands = stack_bands(yr, roi)\n annual_stack = input_bands.select(input_props)\n classified_img = annual_stack.classify(trained_model).int().set({\n 'system:index': ee.Date('{}-01-01'.format(yr)).format('YYYYMMdd'),\n 'system:time_start': ee.Date('{}-01-01'.format(yr)).millis(),\n 'system:time_end': ee.Date('{}-12-31'.format(yr)).millis(),\n 'image_name': out_name,\n 'class_key': '0: irrigated, 1: rainfed, 2: uncultivated, 3: wetland'})\n\n if export == 'asset':\n task = ee.batch.Export.image.toAsset(\n image=classified_img,\n description='{}_{}'.format(out_name, yr),\n assetId=os.path.join(asset_root, '{}_{}'.format(out_name, yr)),\n region=mask,\n scale=30,\n pyramidingPolicy={'.default': 'mode'},\n maxPixels=1e13)\n\n elif export == 'cloud':\n task = ee.batch.Export.image.toCloudStorage(\n image=classified_img,\n description='{}_{}'.format(out_name, yr),\n bucket='wudr',\n fileNamePrefix='{}_{}'.format(yr, out_name),\n region=mask,\n scale=30,\n pyramidingPolicy={'.default': 'mode'},\n maxPixels=1e13)\n else:\n raise NotImplementedError('choose asset or cloud for export')\n\n task.start()\n print(os.path.join(asset_root, '{}_{}'.format(out_name, yr)))",
"def get_sr_series(tables, out_name, max_sample=500):\n\n pt_ct = 0\n for year in YEARS:\n for state in TARGET_STATES:\n\n name_prefix = '{}_{}_{}'.format(out_name, state, year)\n local_file = os.path.join('/home/dgketchum/IrrigationGIS/EE_extracts/to_concatenate',\n '{}.csv'.format(name_prefix))\n if os.path.isfile(local_file):\n continue\n else:\n print(local_file)\n\n roi = ee.FeatureCollection(os.path.join(BOUNDARIES, state))\n\n start = '{}-01-01'.format(year)\n d = datetime.strptime(start, '%Y-%m-%d')\n epoch = datetime.utcfromtimestamp(0)\n start_millisec = str(int((d - epoch).total_seconds() * 1000))\n\n table = ee.FeatureCollection(tables)\n table = table.filter(ee.Filter.eq('YEAR', start_millisec))\n table = table.filterBounds(roi)\n table = table.randomColumn('rnd')\n points = table.size().getInfo()\n print('{} {} {} points'.format(state, year, points))\n\n n_splits = int(ceil(points / float(max_sample)))\n ranges = linspace(0, 1, n_splits + 1)\n diff = ranges[1] - ranges[0]\n\n for enum, slice in enumerate(ranges[:-1], start=1):\n slice_table = table.filter(ee.Filter.And(ee.Filter.gte('rnd', slice),\n ee.Filter.lt('rnd', slice + diff)))\n points = slice_table.size().getInfo()\n print('{} {} {} points'.format(state, year, points))\n\n name_prefix = '{}_{}_{}_{}'.format(out_name, state, enum, year)\n local_file = os.path.join('/home/dgketchum/IrrigationGIS/EE_extracts/to_concatenate',\n '{}.csv'.format(name_prefix))\n if os.path.isfile(local_file):\n continue\n else:\n print(local_file)\n\n pt_ct += points\n if points == 0:\n continue\n\n ls_sr_masked = daily_landsat(year, roi)\n stats = ls_sr_masked.sampleRegions(collection=table,\n properties=['POINT_TYPE', 'YEAR', 'LAT_GCS', 'Lon_GCS'],\n scale=30,\n tileScale=16)\n\n task = ee.batch.Export.table.toCloudStorage(\n stats,\n description=name_prefix,\n bucket='wudr',\n fileNamePrefix=name_prefix,\n fileFormat='CSV')\n\n task.start()\n print('{} total points'.format(pt_ct))",
"def export_tasks(self, samples, features, export_id):\n samples_for_sharding = samples.randomColumn('shard_split')\n for i in range(self.num_shards):\n range_min = float(i) / float(self.num_shards)\n range_max = float(i + 1) / float(self.num_shards)\n range_filter = ee.Filter.And(\n ee.Filter.gte('shard_split', range_min),\n ee.Filter.lt('shard_split', range_max))\n samples_to_export = samples_for_sharding.filter(range_filter)\n\n task = ee.batch.Export.table.toCloudStorage(\n collection=samples_to_export,\n description=export_id + \"_%i\" % i,\n bucket=self.bucket,\n fileNamePrefix=self.directory + '/' + export_id + \"_%i\" % i,\n fileFormat='TFRecord',\n selectors=features,\n maxWorkers=2000)\n\n # Can be a stopping call if TaskManager if busy.\n self.task_manager.submit(task)",
"def main():\n\n store = file.Storage('token.json')\n creds = store.get() # set to None, for re-authentication\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets('credentials.json', SCOPES)\n creds = tools.run_flow(flow, store)\n service = build('sheets', 'v4', http=creds.authorize(Http()))\n\n cards = []\n i = 0\n with open(\"The landing cards.txt\") as f:\n for l, llint in enumerate(f):\n for line in llint.split('\\n\\n'):\n if line == '':\n continue\n if '________________' in line:\n if i % 2 == 0:\n cards.append([''])\n else:\n cards[-1].append('')\n i += 1\n cards[-1][-1] += line.replace('________________', '')\n\n\n\n write('Templates!A1:B100', cards, service)",
"def export_sample(sample):\n outfile = os.path.join(sample.work_directory, sample.sample_id + '_sample.json')\n # print pretty JSON: print(json.dumps(parser.reads,indent=4, cls=CustomEncoder))\n with open(outfile, 'w') as out:\n # out.write(json.dumps(sample, indent=4, cls=CustomEncoder))\n json.dump(sample, out, cls=CustomEncoder)",
"def generate_sample_sheet(self):\n pool = self.pool\n bcl2fastq_sample_ids = []\n i7_names = []\n i7_sequences = []\n i5_names = []\n i5_sequences = []\n wells = []\n plate = pool.container.external_id\n sample_ids = []\n sequencer_type = self.sequencer.equipment_type\n\n for component in pool.components:\n lp_composition = component['composition']\n # Get the well information\n wells.append(lp_composition.container.well_id)\n # Get the i7 index information\n i7_comp = lp_composition.i7_composition.primer_set_composition\n i7_names.append(i7_comp.external_id)\n i7_sequences.append(i7_comp.barcode)\n # Get the i5 index information\n i5_comp = lp_composition.i5_composition.primer_set_composition\n i5_names.append(i5_comp.external_id)\n i5_sequences.append(i5_comp.barcode)\n # Get the sample id\n sample_id = lp_composition.normalized_gdna_composition.\\\n gdna_composition.sample_composition.content\n sample_ids.append(sample_id)\n\n # Transform te sample ids to be bcl2fastq-compatible\n bcl2fastq_sample_ids = [\n SequencingProcess._bcl_scrub_name(sid) for sid in sample_ids]\n # Reverse the i5 sequences if needed based on the sequencer\n i5_sequences = SequencingProcess._sequencer_i5_index(\n sequencer_type, i5_sequences)\n\n data = SequencingProcess._format_sample_sheet_data(\n bcl2fastq_sample_ids, i7_names, i7_sequences, i5_names,\n i5_sequences, wells=wells, sample_plate=plate,\n description=sample_ids, sample_proj=self.run_name,\n lanes=self.lanes, sep=',')\n\n contacts = {c.name: c.email for c in self.contacts}\n pi = self.principal_investigator\n principal_investigator = {pi.name: pi.email}\n sample_sheet_dict = {\n 'comments': SequencingProcess._format_sample_sheet_comments(\n principal_investigator=principal_investigator,\n contacts=contacts),\n 'IEMFileVersion': '4',\n 'Investigator Name': pi.name,\n 'Experiment Name': self.experiment,\n 'Date': str(self.date),\n 'Workflow': 'GenerateFASTQ',\n 'Application': 'FASTQ Only',\n 'Assay': self.assay,\n 'Description': '',\n 'Chemistry': 'Default',\n 'read1': self.fwd_cycles,\n 'read2': self.rev_cycles,\n 'ReverseComplement': '0',\n 'data': data}\n return SequencingProcess._format_sample_sheet(sample_sheet_dict)",
"def export_tsv(self):\n outputfile = tkinter.filedialog.asksaveasfilename(\n defaultextension=\".tsv\",\n filetypes=((\"tab seperated values\", \"*.tsv\"),\n (\"All Files\", \"*.*\")))\n if outputfile:\n tabledata = self.tabs.window.aistracker.create_table_data()\n export.write_csv_file(tabledata, outputfile, dialect='excel-tab')\n else:\n raise ExportAborted('Export cancelled by user.')",
"def test_export():\n tpot_obj = TPOTClassifier()\n\n try:\n tpot_obj.export(\"test_export.py\")\n assert False # Should be unreachable\n except ValueError:\n pass",
"def write(cls, experiment: Experiment):\n cls.__mutex.acquire()\n os.makedirs('./temp', exist_ok=True)\n worksheet = cls.__workbook.add_worksheet(experiment.name)\n for i, value in enumerate(experiment.values.items()):\n worksheet.write(0, i, value[0])\n worksheet.write_column(1, i, value[1])\n if experiment.model == 'accuracy':\n # cls.__add_accuracy_plot(worksheet, value)\n cls.test(worksheet, value)\n\n pass\n\n if experiment.model == 'performance':\n cls.test(worksheet, value)\n pass\n # cls.__add_accuracy_plot(worksheet, value)\n\n cls.__mutex.release()",
"def exporter():\n Session = modules.db_connect.connect()\n session = Session()\n report = xlsxwriter.Workbook('perception_report.xlsx')\n top_row_format = report.add_format({'bold': True})\n top_row_format.set_border(style=1)\n top_row_format.set_bg_color('#B8B8B8')\n\n \"\"\"Black row format at the top of each host detailed info\"\"\"\n black_row_format = report.add_format()\n black_row_format.set_border(style=1)\n black_row_format.set_bg_color('#000000')\n\n \"\"\"Detailed host row format\"\"\"\n host_row_format = report.add_format()\n host_row_format.set_border(style=1)\n host_row_format.set_bg_color('#CCCCCC')\n\n \"\"\"Format for text in row with host info\"\"\"\n host_row_wrapped_format = report.add_format()\n host_row_wrapped_format.set_border(style=1)\n host_row_wrapped_format.set_bg_color('#CCCCCC')\n host_row_wrapped_format.set_text_wrap('vjustify')\n\n \"\"\"Format description row in NSE output\"\"\"\n host_nse_output_top_format = report.add_format({'bold': True})\n host_nse_output_top_format.set_border(style=1)\n host_nse_output_top_format.set_bg_color('#B8B8B8')\n\n \"\"\"Format test row in NSE output\"\"\"\n host_nse_output_format = report.add_format()\n host_nse_output_format.set_border(style=1)\n host_nse_output_format.set_bg_color('#CCCCCC')\n\n \"\"\"Build the host_overview_worksheet\"\"\"\n host_overview_worksheet = report.add_worksheet()\n\n \"\"\"Build the host_detail_worksheet\"\"\"\n host_detail_worksheet = report.add_worksheet()\n\n \"\"\"Size up the overview worksheet\"\"\"\n host_overview_worksheet.set_column('B:B', 24)\n host_overview_worksheet.set_column('C:C', 15)\n host_overview_worksheet.set_column('D:D', 15)\n host_overview_worksheet.set_column('E:E', 15)\n host_overview_worksheet.set_column('F:F', 15)\n host_overview_worksheet.set_column('G:G', 20)\n host_overview_worksheet.set_column('H:H', 15)\n\n \"\"\"Size up the detail worksheet\"\"\"\n host_detail_worksheet.set_column('B:B', 38)\n host_detail_worksheet.set_column('C:C', 16)\n host_detail_worksheet.set_column('D:D', 16)\n host_detail_worksheet.set_column('E:E', 28)\n host_detail_worksheet.set_column('F:F', 15)\n host_detail_worksheet.set_column('H:G', 20)\n host_detail_worksheet.set_column('H:H', 25)\n host_detail_worksheet.set_column('I:I', 10)\n\n \"\"\"Description row for host overview\"\"\"\n host_overview_worksheet.write('B2', 'Hostname', top_row_format)\n host_overview_worksheet.write('C2', 'IP v4 Address', top_row_format)\n host_overview_worksheet.write('D2', 'IP v6 Address', top_row_format)\n host_overview_worksheet.write('E2', 'MAC Address', top_row_format)\n host_overview_worksheet.write('F2', 'MAC Vendor', top_row_format)\n host_overview_worksheet.write('G2', 'Operating System', top_row_format)\n host_overview_worksheet.write('H2', 'Host Type', top_row_format)\n\n \"\"\"Query the database for the hosts\"\"\"\n inventory_hosts = session.query(InventoryHost).all()\n\n \"\"\"Build overview worksheet\"\"\"\n overview_row = 2\n overview_col = 1\n for host in inventory_hosts:\n host_overview_worksheet.write(overview_row, overview_col, host.host_name, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 1, host.ipv4_addr, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 2, host.ipv6_addr, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 3, host.macaddr, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 4, host.mac_vendor.name, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 5, host.product.name, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 6, host.host_type, host_row_format)\n overview_row += 1\n\n \"\"\"Build detailed worksheet\"\"\"\n detail_row = 2\n detail_col = 1\n for host in inventory_hosts:\n\n \"\"\"Add the black row to start host detail info\"\"\"\n host_detail_worksheet.set_row(detail_row, 5)\n host_detail_worksheet.write(detail_row, detail_col, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, '', black_row_format)\n detail_row += 1\n\n \"\"\"Add row detail info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Hostname', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'IP v4 Address', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'IP v6 Address', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'MAC Address', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'MAC Vendor', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'Host Type', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'Operating System', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'Version', top_row_format)\n detail_row += 1\n\n \"\"\"Add host info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, host.host_name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, host.ipv4_addr, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, host.ipv6_addr, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, host.macaddr, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, host.mac_vendor.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, host.host_type, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, host.product.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, host.product.version, host_row_format)\n detail_row += 2\n\n \"\"\"If there is no host nse script, just say so.\"\"\"\n if not host.host_nse_scripts:\n host_detail_worksheet.write(detail_row, detail_col, 'Host NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n host_detail_worksheet.write(detail_row, detail_col, 'No Script Name', host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'No Script Output', host_row_wrapped_format)\n detail_row += 2\n else:\n\n \"\"\"Add the row detail\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Host NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n\n \"\"\"Grab all the scripts\"\"\"\n for host_scripts in host.host_nse_scripts:\n\n \"\"\"Count output the lines so we know what to merge\"\"\"\n lines = host_scripts.output.count('\\n')\n\n if lines > 0:\n\n \"\"\"Merge the rows and write the name and output\"\"\"\n host_detail_worksheet.merge_range(detail_row, detail_col, detail_row + lines, detail_col,\n host_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines, detail_col + 7,\n host_scripts.output, host_row_wrapped_format)\n detail_row += 1\n else:\n\n \"\"\"Single line output\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, host_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines, detail_col + 7,\n host_scripts.output, host_row_wrapped_format)\n detail_row += 1\n\n if not host.inventory_svcs:\n\n \"\"\"If there are no services for this host tell me\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Protocol', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'Port', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'Name', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'Svc Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'Extra Info', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'Version', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'Update', top_row_format)\n detail_row += 1\n\n host_detail_worksheet.write(detail_row, detail_col, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'no services', host_row_format)\n detail_row += 1\n\n else:\n for ports in host.inventory_svcs:\n\n \"\"\"Host services row info\"\"\"\n detail_row += 1\n host_detail_worksheet.write(detail_row, detail_col, 'Protocol', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'Port', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'Name', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'Svc Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'Extra Info', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'Version', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'Update', top_row_format)\n detail_row += 1\n\n \"\"\"Write the service info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, ports.protocol, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, ports.portid, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, ports.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, ports.svc_product, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, ports.extra_info, host_row_format)\n try:\n\n \"\"\"There may not be product info, but try.\"\"\"\n host_detail_worksheet.write(detail_row, detail_col + 5, ports.product.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, ports.product.version, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, ports.product.product_update,\n host_row_format)\n detail_row += 1\n except AttributeError:\n\n \"\"\"Just write unknown if there is no product info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col + 5, 'unknown', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'unknown', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'unknown', host_row_format)\n detail_row += 1\n\n if not ports.svc_nse_scripts:\n\n \"\"\"If there is no NSE script info just say so.\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Svc NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n host_detail_worksheet.write(detail_row, detail_col, 'No Script Name', host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'No Script Output', host_row_wrapped_format)\n detail_row += 2\n\n else:\n\n \"\"\"Service Script row detail\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Svc NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n\n \"\"\"Grab all the scripts\"\"\"\n for nse_scripts in ports.svc_nse_scripts:\n\n \"\"\"Count the lines in the output for merging\"\"\"\n lines = nse_scripts.output.count('\\n')\n\n if lines > 0:\n\n \"\"\"Merge the rows and write the name and output\"\"\"\n host_detail_worksheet.merge_range(detail_row, detail_col, detail_row + lines, detail_col,\n nse_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines, detail_col + 7,\n nse_scripts.output, host_row_wrapped_format)\n detail_row += 1\n else:\n\n \"\"\"Single line output\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, nse_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines,\n detail_col + 7, nse_scripts.output,\n host_row_wrapped_format)\n detail_row += 1\n\n detail_row += 1\n report.close()\n session.close()",
"def gen_simple_test():\n count = 1\n mdict = {\n 'operating_frequency': 3e8,\n 'sample_rate': 8e3,\n 'signal': [1] * 5,\n 'origin_pos': [1000, 0, 0],\n 'dest_pos': [300, 200, 50],\n 'origin_vel': [0] * 3,\n 'dest_vel': [0] * 3,\n }\n io.savemat('{}{}_input'.format(tests_path, count), mdict)",
"def Dump():\n with open(path.join(MAIN_PATH, INST), \"wb\") as f:\n writer = csv.writer(f, delimiter=\",\")\n\n for inst in instances:\n writer.writerow(inst)\n \n with open(path.join(MAIN_PATH, \"test_instances.csv\"), \"wb\") as f:\n writer = csv.writer(f, delimiter=\",\")\n\n for inst in test_instances:\n writer.writerow(inst)",
"def convert_testing_data(mfccPath):\n inputlist, inputnamelist = ark_parser(mfccPath, 'test.ark')\n\n print(\"%d sample in testing set\" % len(inputlist))\n with open('./test_data.pkl', 'wb') as test_data:\n pickle.dump(inputlist, test_data)\n \n with open('./test_name.pkl', 'wb') as test_name:\n pickle.dump(inputnamelist, test_name)",
"def export_data(self, pth):\n self.cleanup_allowed = False\n self.train_df.to_csv(os.path.join(pth, \"train.csv\"))\n self.valid_df.to_csv(os.path.join(pth, \"valid.csv\"))\n self.test_df.to_csv(os.path.join(pth, \"test.csv\"))",
"def request_validation_extract(file_prefix='validation'):\n roi = ee.FeatureCollection(GEO_DOMAIN)\n plots = ee.FeatureCollection(None).filterBounds(roi)\n image_list = list_assets('users/dgketchum/IrrMapper/version_2')\n\n for yr in YEARS:\n yr_img = [x for x in image_list if x.endswith(str(yr))]\n coll = ee.ImageCollection(yr_img)\n classified = coll.mosaic().select('classification')\n\n filtered = plots.filter(ee.Filter.eq('YEAR', yr))\n\n plot_sample_regions = classified.sampleRegions(\n collection=filtered,\n properties=['POINT_TYPE', 'YEAR', 'FID'],\n scale=30)\n\n task = ee.batch.Export.table.toCloudStorage(\n plot_sample_regions,\n description='{}_{}'.format(file_prefix, yr),\n bucket='wudr',\n fileNamePrefix='{}_{}'.format(file_prefix, yr),\n fileFormat='CSV')\n\n task.start()\n print(yr)",
"def save_and_upload_cohort_all_samples(all_samples, name, namespace, workspace, blacklist=[]):\n df = all_samples[['entity:sample_id']].rename(columns={'entity:sample_id': 'sample_id'})\n df['membership:sample_set_id'] = name\n\n # Re-arrange columns\n cols = ['membership:sample_set_id', 'sample_id']\n df = df[cols]\n\n # Blacklist\n df = df[ ~df['sample_id'].isin(blacklist) ]\n df.to_csv('all_samples/fc_upload_%s.txt'%name, index=None, sep=\"\\t\")\n res = upload_entities_from_tsv(namespace, workspace, 'all_samples/fc_upload_%s.txt'%name)\n return res",
"def create_template(path_string) :\r\n today = datetime.now()\r\n today = today.strftime('%y%y%m%d%H%M%S')\r\n # print(today)\r\n temp_path = os.path.join(path_string, today)\r\n # temp_path = today\r\n # Create a workbook and add a worksheet.\r\n workbook = xlsxwriter.Workbook(f'{temp_path}.xlsx')\r\n worksheet0 = workbook.add_worksheet('ATR') # Defaults to Sheet1.\r\n worksheet1 = workbook.add_worksheet('ESS') # Data.\r\n worksheet2 = workbook.add_worksheet('Statistics') # Defaults to Sheet\r\n\r\n # Some data we want to write to the worksheet.\r\n Tests_List = ['Temp', 'SN', 'Output Power @ P1dBCP', 'Output Power Control Range/Resolution, FWD PWR Ind',\r\n 'Output IP3', 'LO Carrier Leakage', 'Sideband Suppression',\r\n 'Frequency Accuracy and Stability', 'A1 - Noise Figure vs. Gain', 'A1 - Gain variability',\r\n 'A1 - Image Suppression vs. Gain', 'Spurious',\r\n 'A2 - Noise Figure vs. Gain', 'A2 - Gain variability', 'A2 - Image Suppression vs. Gain',\r\n 'Average Power Consumption', 'Input Voltage', 'Digital Tests'\r\n ]\r\n\r\n # Start from the first cell. Rows and columns are zero indexed.\r\n row = 0\r\n # col = 0\r\n\r\n # Iterate over the data and write it out row by row.\r\n for index in range(3) :\r\n for i in range(len(Tests_List)) :\r\n worksheet0.write(row, i, Tests_List[i])\r\n worksheet1.write(row, i, Tests_List[i])\r\n worksheet2.write(row, i, Tests_List[i])\r\n # col += 1\r\n\r\n workbook.close()\r\n\r\n return today, temp_path",
"def write_clinical_patient_tsv(sheets):\n\n # Header lines, first item must start with #\n # attribute Display Names\n NAMES = [\"#Patient Identifier\", \"Dummy value\"]\n # attribute Descriptions\n DESC = [\"#Patient Identifier\", \"Dummy value\"]\n # attribute Datatype\n DATATYPE = [\"#STRING\", \"STRING\"]\n # attribute Priority\n PRIORITY = [\"#1\", \"1\"]\n # attribute columns\n COLUMNS = [\"PATIENT_ID\", \"DUMMY\"]\n\n patients = []\n for sheet in sheets:\n patients += [bio_entity.name for bio_entity in sheet.bio_entities.values()]\n\n with open(snakemake.output.patients_tsv, \"w\") as tsvfile:\n writer = csv.writer(tsvfile, delimiter=\"\\t\")\n # write header\n writer.writerow(NAMES)\n writer.writerow(DESC)\n writer.writerow(DATATYPE)\n writer.writerow(PRIORITY)\n writer.writerow(COLUMNS)\n\n for p in patients:\n writer.writerow([p, \"UNKNOWN\"])",
"def export(self):\n if len(self.records) == 0:\n exit_message = \"Exiting. There are no records for {} {} to export.\".format(self.args.date.strftime(\"%B\"), self.year)\n sys.exit(exit_message)\n\n total_days = (self.args.date.replace(month = self.args.date.month % 12 +1, day = 1)-timedelta(days=1)).day\n start_month = self.args.date.replace(day = 1)\n end_month = self.args.date.replace(day = total_days)\n workdays = self.netto_workdays(start_month, end_month, weekend_days=(5,6))\n template_file = os.path.join(self.config[\"templates_dir\"], \"template_timesheet_{}_days.xlsx\".format(workdays))\n\n export_file = os.path.join(self.config[\"exports_dir\"], \"timesheet_{}_{}.xlsx\".format(self.year, self.month_str))\n\n # set locale to use weekdays, months full name in german\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n wb = load_workbook(template_file)\n ws = wb.active\n ws.cell(row=7, column=4).value = self.config[\"name\"]\n month_year_str = \"{} {}\".format(self.args.date.strftime(\"%B\"), self.year)\n ws.cell(row=8, column=4).value = month_year_str\n row = 12\n for record in self.records:\n col = 2\n date = datetime.strptime(record[\"date\"], \"%d.%m.%Y\")\n ws.cell(row=row, column=col).value = date.strftime(\"%A\")\n col += 1\n ws.cell(row=row, column=col).value = date\n col += 1\n if \"special\" in record.keys() and record[\"special\"] == \"true\":\n ws.cell(row=row, column=9).value = 8.00\n col += 4\n else:\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_break\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_break\"], \"%H:%M\").time()\n col += 4\n ws.cell(row=row, column=col).value = record[\"comment\"]\n row += 1\n wb.save(export_file)\n return True"
]
| [
"0.63185227",
"0.62939477",
"0.5897288",
"0.57337177",
"0.5581582",
"0.55671644",
"0.5473567",
"0.5473567",
"0.53622097",
"0.534652",
"0.52113193",
"0.51882476",
"0.5183746",
"0.51815856",
"0.51769376",
"0.51543266",
"0.51492745",
"0.51251966",
"0.51034665",
"0.5092538",
"0.5045036",
"0.50420755",
"0.50405717",
"0.50368375",
"0.49984357",
"0.49890977",
"0.4983227",
"0.4975782",
"0.49511093",
"0.4950966"
]
| 0.7652788 | 0 |
Export samplesheets for tecan machine. | def export_tecan(args):
clarity_epp.export.tecan.samplesheet(lims, args.process_id, args.type, args.output_file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def export_tapestation(args):\n clarity_epp.export.tapestation.samplesheet(lims, args.process_id, args.output_file)",
"def test_generate_sample_sheet(self):\n pass",
"def write_clinical_samples_tsv(sheets):\n\n # Header lines, first item must start with #\n # attribute Display Names\n NAMES = [\"#Patient Identifier\", \"Sample Identifier\"]\n # attribute Descriptions\n DESC = [\"#Patient Identifier\", \"Sample Identifier\"]\n # attribute Datatype\n DATATYPE = [\"#STRING\", \"STRING\"]\n # attribute Priority\n PRIORITY = [\"#1\", \"1\"]\n # attribute columns\n COLUMNS = [\"PATIENT_ID\", \"SAMPLE_ID\"]\n\n with open(snakemake.output.samples_tsv, \"w\") as tsvfile:\n writer = csv.writer(tsvfile, delimiter=\"\\t\")\n # write header\n writer.writerow(NAMES)\n writer.writerow(DESC)\n writer.writerow(DATATYPE)\n writer.writerow(PRIORITY)\n writer.writerow(COLUMNS)\n\n for sheet in sheets:\n for p in sheet.bio_entities.values():\n for s in p.bio_samples.values():\n if s.extra_infos[\"isTumor\"]:\n writer.writerow([p.name, s.name])",
"def test_export_spreadsheet(self):\r\n client = self.getClient()\r\n if client:\r\n exp = [['#SampleID', 'DOB'],\r\n ['#Example mapping file for the QIIME analysis package. '\r\n 'These 9 samples are from a study of the effects of exercise '\r\n 'and diet on mouse cardiac physiology (Crawford, et al, '\r\n 'PNAS, 2009).'], ['PC.354', '20061218'],\r\n ['PC.355', '20061218'], ['PC.356', '20061126'],\r\n ['PC.481', '20070314'], ['PC.593', '20071210'],\r\n ['PC.607', '20071112'], ['PC.634', '20080116'],\r\n ['PC.635', '20080116'], ['PC.636', '20080116']]\r\n obs = _export_spreadsheet(client, self.spreadsheet_key,\r\n self.worksheet_id, ['#SampleID', 'DOB'])\r\n self.assertEqual(obs, exp)\r\n else:\r\n raise GoogleSpreadsheetConnectionError(\"Cannot execute test \"\r\n \"without an active Internet connection.\")",
"def main():\n\n gephyrin_df = gephyrin_pairwise()\n cav31_df = cav31_pairwise()\n synapsin_df = synapsin_pairwise()\n psd_df = psd95_pairwise()\n vglut1_df = vglut1_pairwise()\n\n\n sheet_name = 'Pairwise'\n fn = 'pairwise_comparisons.xlsx'\n df_list = [synapsin_df, vglut1_df, psd_df, gephyrin_df, cav31_df]\n aa.write_dfs_to_excel(df_list, sheet_name, fn)",
"def export_hamilton(args):\n if args.type == 'filling_out':\n clarity_epp.export.hamilton.samplesheet_filling_out(lims, args.process_id, args.output_file)\n elif args.type == 'purify':\n clarity_epp.export.hamilton.samplesheet_purify(lims, args.process_id, args.output_file)",
"def start_output(self):\r\n self.create_output_file()\r\n\r\n for elem in range(len(self.output_zakladki)):\r\n self.output_file.create_sheet(self.output_zakladki[elem], elem)\r\n\r\n self.remowe_first_sheet()",
"def test_export():\n tpot_obj = TPOTClassifier()\n\n try:\n tpot_obj.export(\"test_export.py\")\n assert False # Should be unreachable\n except ValueError:\n pass",
"def export_tsv(self):\n outputfile = tkinter.filedialog.asksaveasfilename(\n defaultextension=\".tsv\",\n filetypes=((\"tab seperated values\", \"*.tsv\"),\n (\"All Files\", \"*.*\")))\n if outputfile:\n tabledata = self.tabs.window.aistracker.create_table_data()\n export.write_csv_file(tabledata, outputfile, dialect='excel-tab')\n else:\n raise ExportAborted('Export cancelled by user.')",
"def export_classification(out_name, table, asset_root, region, years, export='asset'):\n fc = ee.FeatureCollection(table)\n roi = ee.FeatureCollection(region)\n mask = roi.geometry().bounds().getInfo()['coordinates']\n\n classifier = ee.Classifier.randomForest(\n numberOfTrees=100,\n variablesPerSplit=0,\n minLeafPopulation=1,\n outOfBagMode=False).setOutputMode('CLASSIFICATION')\n\n input_props = fc.first().propertyNames().remove('YEAR').remove('POINT_TYPE').remove('system:index')\n\n trained_model = classifier.train(fc, 'POINT_TYPE', input_props)\n\n for yr in years:\n input_bands = stack_bands(yr, roi)\n annual_stack = input_bands.select(input_props)\n classified_img = annual_stack.classify(trained_model).int().set({\n 'system:index': ee.Date('{}-01-01'.format(yr)).format('YYYYMMdd'),\n 'system:time_start': ee.Date('{}-01-01'.format(yr)).millis(),\n 'system:time_end': ee.Date('{}-12-31'.format(yr)).millis(),\n 'image_name': out_name,\n 'class_key': '0: irrigated, 1: rainfed, 2: uncultivated, 3: wetland'})\n\n if export == 'asset':\n task = ee.batch.Export.image.toAsset(\n image=classified_img,\n description='{}_{}'.format(out_name, yr),\n assetId=os.path.join(asset_root, '{}_{}'.format(out_name, yr)),\n region=mask,\n scale=30,\n pyramidingPolicy={'.default': 'mode'},\n maxPixels=1e13)\n\n elif export == 'cloud':\n task = ee.batch.Export.image.toCloudStorage(\n image=classified_img,\n description='{}_{}'.format(out_name, yr),\n bucket='wudr',\n fileNamePrefix='{}_{}'.format(yr, out_name),\n region=mask,\n scale=30,\n pyramidingPolicy={'.default': 'mode'},\n maxPixels=1e13)\n else:\n raise NotImplementedError('choose asset or cloud for export')\n\n task.start()\n print(os.path.join(asset_root, '{}_{}'.format(out_name, yr)))",
"def driver():\n\n directory = r\"C:/Users/Aftab Alam/Documents/GitHub\"\n directory = directory + r\"/SRM-placement-analyser/data/\"\n fileList = [directory+\"InfosysResult.xlsx\",directory+\"TCSResult.xlsx\",directory+\"CognizantResult.xlsx\",directory+\"WiproResult.xlsx\"]\n \n listOfPlaced = extractCommonData.extractCommonData(fileList)\n createNewExcelSheet(directory,listOfPlaced)",
"def get_sr_series(tables, out_name, max_sample=500):\n\n pt_ct = 0\n for year in YEARS:\n for state in TARGET_STATES:\n\n name_prefix = '{}_{}_{}'.format(out_name, state, year)\n local_file = os.path.join('/home/dgketchum/IrrigationGIS/EE_extracts/to_concatenate',\n '{}.csv'.format(name_prefix))\n if os.path.isfile(local_file):\n continue\n else:\n print(local_file)\n\n roi = ee.FeatureCollection(os.path.join(BOUNDARIES, state))\n\n start = '{}-01-01'.format(year)\n d = datetime.strptime(start, '%Y-%m-%d')\n epoch = datetime.utcfromtimestamp(0)\n start_millisec = str(int((d - epoch).total_seconds() * 1000))\n\n table = ee.FeatureCollection(tables)\n table = table.filter(ee.Filter.eq('YEAR', start_millisec))\n table = table.filterBounds(roi)\n table = table.randomColumn('rnd')\n points = table.size().getInfo()\n print('{} {} {} points'.format(state, year, points))\n\n n_splits = int(ceil(points / float(max_sample)))\n ranges = linspace(0, 1, n_splits + 1)\n diff = ranges[1] - ranges[0]\n\n for enum, slice in enumerate(ranges[:-1], start=1):\n slice_table = table.filter(ee.Filter.And(ee.Filter.gte('rnd', slice),\n ee.Filter.lt('rnd', slice + diff)))\n points = slice_table.size().getInfo()\n print('{} {} {} points'.format(state, year, points))\n\n name_prefix = '{}_{}_{}_{}'.format(out_name, state, enum, year)\n local_file = os.path.join('/home/dgketchum/IrrigationGIS/EE_extracts/to_concatenate',\n '{}.csv'.format(name_prefix))\n if os.path.isfile(local_file):\n continue\n else:\n print(local_file)\n\n pt_ct += points\n if points == 0:\n continue\n\n ls_sr_masked = daily_landsat(year, roi)\n stats = ls_sr_masked.sampleRegions(collection=table,\n properties=['POINT_TYPE', 'YEAR', 'LAT_GCS', 'Lon_GCS'],\n scale=30,\n tileScale=16)\n\n task = ee.batch.Export.table.toCloudStorage(\n stats,\n description=name_prefix,\n bucket='wudr',\n fileNamePrefix=name_prefix,\n fileFormat='CSV')\n\n task.start()\n print('{} total points'.format(pt_ct))",
"def export_caliper(args):\n if args.type == 'normalise':\n clarity_epp.export.caliper.samplesheet_normalise(lims, args.process_id, args.output_file)\n elif args.type == 'dilute':\n clarity_epp.export.caliper.samplesheet_dilute(lims, args.process_id, args.output_file)",
"def write(cls, experiment: Experiment):\n cls.__mutex.acquire()\n os.makedirs('./temp', exist_ok=True)\n worksheet = cls.__workbook.add_worksheet(experiment.name)\n for i, value in enumerate(experiment.values.items()):\n worksheet.write(0, i, value[0])\n worksheet.write_column(1, i, value[1])\n if experiment.model == 'accuracy':\n # cls.__add_accuracy_plot(worksheet, value)\n cls.test(worksheet, value)\n\n pass\n\n if experiment.model == 'performance':\n cls.test(worksheet, value)\n pass\n # cls.__add_accuracy_plot(worksheet, value)\n\n cls.__mutex.release()",
"def export_tasks(self, samples, features, export_id):\n samples_for_sharding = samples.randomColumn('shard_split')\n for i in range(self.num_shards):\n range_min = float(i) / float(self.num_shards)\n range_max = float(i + 1) / float(self.num_shards)\n range_filter = ee.Filter.And(\n ee.Filter.gte('shard_split', range_min),\n ee.Filter.lt('shard_split', range_max))\n samples_to_export = samples_for_sharding.filter(range_filter)\n\n task = ee.batch.Export.table.toCloudStorage(\n collection=samples_to_export,\n description=export_id + \"_%i\" % i,\n bucket=self.bucket,\n fileNamePrefix=self.directory + '/' + export_id + \"_%i\" % i,\n fileFormat='TFRecord',\n selectors=features,\n maxWorkers=2000)\n\n # Can be a stopping call if TaskManager if busy.\n self.task_manager.submit(task)",
"def test_export(filename, folder, space_type):\n grid = bempp.api.shapes.cube(h=0.5)\n space = bempp.api.function_space(grid, *space_type)\n function = bempp.api.GridFunction(\n space, coefficients=np.random.rand(space.global_dof_count)\n )\n bempp.api.export(os.path.join(folder, filename), grid_function=function)",
"def gen_simple_test():\n count = 1\n mdict = {\n 'operating_frequency': 3e8,\n 'sample_rate': 8e3,\n 'signal': [1] * 5,\n 'origin_pos': [1000, 0, 0],\n 'dest_pos': [300, 200, 50],\n 'origin_vel': [0] * 3,\n 'dest_vel': [0] * 3,\n }\n io.savemat('{}{}_input'.format(tests_path, count), mdict)",
"def save_and_upload_cohort_all_tumors(all_samples, name, namespace, workspace, blacklist=[]):\n tumor_samples = all_samples[all_samples.sample_type == \"Tumor\"]\n\n # Prepare column names\n df = tumor_samples[['entity:sample_id']].rename(columns={'entity:sample_id': 'sample_id'})\n df['membership:sample_set_id'] = name\n\n # Re-arrange columns\n cols = ['membership:sample_set_id', 'sample_id']\n df = df[cols]\n\n # Blacklist\n df = df[ ~df['sample_id'].isin(blacklist) ]\n df.to_csv('tumor_samples/fc_upload_%s.txt'%name, index=None, sep=\"\\t\")\n res = upload_entities_from_tsv(namespace, workspace, 'tumor_samples/fc_upload_%s.txt'%name)\n return res",
"def generate_sample_sheet(self):\n pool = self.pool\n bcl2fastq_sample_ids = []\n i7_names = []\n i7_sequences = []\n i5_names = []\n i5_sequences = []\n wells = []\n plate = pool.container.external_id\n sample_ids = []\n sequencer_type = self.sequencer.equipment_type\n\n for component in pool.components:\n lp_composition = component['composition']\n # Get the well information\n wells.append(lp_composition.container.well_id)\n # Get the i7 index information\n i7_comp = lp_composition.i7_composition.primer_set_composition\n i7_names.append(i7_comp.external_id)\n i7_sequences.append(i7_comp.barcode)\n # Get the i5 index information\n i5_comp = lp_composition.i5_composition.primer_set_composition\n i5_names.append(i5_comp.external_id)\n i5_sequences.append(i5_comp.barcode)\n # Get the sample id\n sample_id = lp_composition.normalized_gdna_composition.\\\n gdna_composition.sample_composition.content\n sample_ids.append(sample_id)\n\n # Transform te sample ids to be bcl2fastq-compatible\n bcl2fastq_sample_ids = [\n SequencingProcess._bcl_scrub_name(sid) for sid in sample_ids]\n # Reverse the i5 sequences if needed based on the sequencer\n i5_sequences = SequencingProcess._sequencer_i5_index(\n sequencer_type, i5_sequences)\n\n data = SequencingProcess._format_sample_sheet_data(\n bcl2fastq_sample_ids, i7_names, i7_sequences, i5_names,\n i5_sequences, wells=wells, sample_plate=plate,\n description=sample_ids, sample_proj=self.run_name,\n lanes=self.lanes, sep=',')\n\n contacts = {c.name: c.email for c in self.contacts}\n pi = self.principal_investigator\n principal_investigator = {pi.name: pi.email}\n sample_sheet_dict = {\n 'comments': SequencingProcess._format_sample_sheet_comments(\n principal_investigator=principal_investigator,\n contacts=contacts),\n 'IEMFileVersion': '4',\n 'Investigator Name': pi.name,\n 'Experiment Name': self.experiment,\n 'Date': str(self.date),\n 'Workflow': 'GenerateFASTQ',\n 'Application': 'FASTQ Only',\n 'Assay': self.assay,\n 'Description': '',\n 'Chemistry': 'Default',\n 'read1': self.fwd_cycles,\n 'read2': self.rev_cycles,\n 'ReverseComplement': '0',\n 'data': data}\n return SequencingProcess._format_sample_sheet(sample_sheet_dict)",
"def main():\n store = file.Storage('token.json')\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets('credentials.json', SCOPES)\n creds = tools.run_flow(flow, store)\n service = build('sheets', 'v4', http=creds.authorize(Http()))\n\n # Call the Sheets API\n SPREADSHEET_ID = '1whfqnqc3TM8ui4hjLqCQq9ZVN5kMuTQrRodXvFreZxM'\n result = service.spreadsheets().get(spreadsheetId = SPREADSHEET_ID).execute()\n spreadsheetUrl = result['spreadsheetUrl']\n\n exportUrl = re.sub(\"\\/edit$\", '/export', spreadsheetUrl)\n headers = { 'Authorization': 'Bearer ' + creds.access_token }\n params = { 'format': 'csv',\n 'gid': 0 } \n queryParams = urllib.urlencode(params)\n url = exportUrl + '?' + queryParams\n response = requests.get(url, headers = headers)\n with open(sys.argv[1], 'wb') as csvFile:\n csvFile.write(response.content)",
"def export_sample(sample):\n outfile = os.path.join(sample.work_directory, sample.sample_id + '_sample.json')\n # print pretty JSON: print(json.dumps(parser.reads,indent=4, cls=CustomEncoder))\n with open(outfile, 'w') as out:\n # out.write(json.dumps(sample, indent=4, cls=CustomEncoder))\n json.dump(sample, out, cls=CustomEncoder)",
"def main():\n\n store = file.Storage('token.json')\n creds = store.get() # set to None, for re-authentication\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets('credentials.json', SCOPES)\n creds = tools.run_flow(flow, store)\n service = build('sheets', 'v4', http=creds.authorize(Http()))\n\n cards = []\n i = 0\n with open(\"The landing cards.txt\") as f:\n for l, llint in enumerate(f):\n for line in llint.split('\\n\\n'):\n if line == '':\n continue\n if '________________' in line:\n if i % 2 == 0:\n cards.append([''])\n else:\n cards[-1].append('')\n i += 1\n cards[-1][-1] += line.replace('________________', '')\n\n\n\n write('Templates!A1:B100', cards, service)",
"def write_clinical_patient_tsv(sheets):\n\n # Header lines, first item must start with #\n # attribute Display Names\n NAMES = [\"#Patient Identifier\", \"Dummy value\"]\n # attribute Descriptions\n DESC = [\"#Patient Identifier\", \"Dummy value\"]\n # attribute Datatype\n DATATYPE = [\"#STRING\", \"STRING\"]\n # attribute Priority\n PRIORITY = [\"#1\", \"1\"]\n # attribute columns\n COLUMNS = [\"PATIENT_ID\", \"DUMMY\"]\n\n patients = []\n for sheet in sheets:\n patients += [bio_entity.name for bio_entity in sheet.bio_entities.values()]\n\n with open(snakemake.output.patients_tsv, \"w\") as tsvfile:\n writer = csv.writer(tsvfile, delimiter=\"\\t\")\n # write header\n writer.writerow(NAMES)\n writer.writerow(DESC)\n writer.writerow(DATATYPE)\n writer.writerow(PRIORITY)\n writer.writerow(COLUMNS)\n\n for p in patients:\n writer.writerow([p, \"UNKNOWN\"])",
"def generate_figures_and_xls_all_strains(outdir, cols_starts, region2data, ext, xls, group2pos, feature_names, samples):\n all_freqs = []\n # concatenate all pos and samples into one dataframe\n dframes = []\n for ri, (ref, pos) in enumerate(region2data.keys()): #regions): #[3]#; print(ref, pos, mt)\n mer, calls = region2data[(ref, pos)]\n for c, s in zip(calls, samples): \n df = pd.DataFrame(c, columns=feature_names)\n df[\"Strain\"] = s\n df[\"chr_pos\"] = \"%s:%s\"%(ref, pos)\n dframes.append(df)\n # read all tsv files\n df = pd.concat(dframes).dropna().reset_index()\n chr_pos, strains = df[\"chr_pos\"].unique(), df[\"Strain\"].unique() \n # compare individual methods\n for clf, method in (\n (KMeans(n_clusters=2), \"KMeans\"), \n (KNeighborsClassifier(), \"KNN\"), \n #(iso_new.iForest(ntrees=100, random_state=0), \"GMM+eIF\"), \n (GaussianMixture(random_state=0, n_components=2), \"GMM\"), \n (AgglomerativeClustering(n_clusters=2), \"AggClust\"), \n #(OneClassSVM(), \"OCSVM\"), \n (IsolationForest(random_state=0), \"IF\"), \n #(iso_new.iForest(ntrees=100, random_state=0), \"eIF\"), \n (RandomForestClassifier(), \"RF\"), \n ):\n fname = method\n for i, cols_start in enumerate(cols_starts, 1):\n results = []\n feat_name = \"_\".join(cols_start)\n fname = \"%s.%s\"%(method, feat_name); print(fname)\n outfn = os.path.join(outdir, \"%s.%s\"%(fname, ext))\n # narrow down the features to only signal intensity & trace\n cols = list(filter(lambda n: n.startswith(cols_start), feature_names))#; print(cols) #, \"DT\"\n # compare all samples to 0%\n s0 = samples[0]\n for s in samples[3:]: \n with np.errstate(under='ignore'):\n if \"+\" in method:\n clf2_name = method.split(\"+\")[-1]\n results += get_mod_freq_two_step(df, cols, chr_pos, [s0, s], feat_name, \n OFFSET=0.5, clf2_name=clf2_name, clf2=clf)\n elif method in (\"KNN\", \"RF\"):\n results += get_mod_freq_clf_train_test(df, cols, chr_pos, [s0, s], samples[1:3], clf, feat_name)\n else:\n results += get_mod_freq_clf(df, cols, chr_pos, [s0, s], clf, feat_name)\n \n # and store mod_freq predicted by various methods\n freqs = pd.DataFrame(results, columns=[\"chr_pos\", \"features\", \"mod_freq wt\", \"mod_freq strain\", \"strain\"])\n freqs[\"diff\"] = freqs.max(axis=1)-freqs.min(axis=1); freqs\n for name, pos in group2pos.items(): #((\"negative\", negatives), (\"pU\", pU_pos), (\"Nm\", Nm_pos)):\n freqs.loc[freqs[\"chr_pos\"].isin(pos), \"group\"] = name\n #freqs.to_csv(outfn, sep=\"\\t\"); freqs.head()\n freqs.to_excel(xls, fname, index=False)\n # plot differences between methods\n for group, pos in group2pos.items():\n freqs.loc[freqs[\"chr_pos\"].isin(pos), \"modification\"] = group\n #return freqs\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))#, sharey=\"all\")\n sns.barplot(x=\"chr_pos\", y=\"mod_freq strain\", hue=\"strain\", edgecolor=\"white\", palette=[\"#f8786fff\", \"#7aae02ff\", \"#00bfc2ff\", \"#c67afeff\"], \n data=freqs[(freqs[\"features\"]==feat_name)&(freqs[\"group\"]==\"pU\")], ax=ax1)\n sns.barplot(x=\"chr_pos\", y=\"mod_freq strain\", hue=\"strain\", edgecolor=\"white\", palette=[\"#ed823aff\", \"#1c6ca9ff\", \"#35d1bbff\", \"#c978fdff\"], \n data=freqs[(freqs[\"features\"]==feat_name)&(freqs[\"group\"]==\"Nm\")], ax=ax2)\n ax1.set_ylabel(\"Per-site stoichiometry\"); ax2.set_ylabel(\"\")\n ax1.get_legend().remove(); ax2.get_legend().remove()#ax1.legend([]); ax2.legend([])\n ax1.set_ylim(0, 1); ax2.set_ylim(0, 1); #ax2.set(aspect=1.7)\n ax1.set_title(\"pU modifications\"); ax2.set_title(\"Nm modifications\")\n fig.suptitle(fname)\n fig.savefig(outfn)\n plt.close() # clear axis\n freqs[\"name\"] = fname\n all_freqs.append(freqs)\n return all_freqs",
"def export(self, desc):\n self.training_data.fillup_x()\n self.training_data.fillup_a()\n self.training_data.fillup_y()\n self.training_data.export(desc + \"_train.csv\")\n \n self.testing_data.fillup_x()\n self.testing_data.export(desc + \"_test_X.csv\")\n \n self.testing_data.reset_df()\n self.testing_data.fillup_ys()\n self.testing_data.fillup_azero()\n self.testing_data.export(desc + \"_test_Ys.csv\")",
"def multi_sheet(self):\n # Initialize #\n all_sheets = []\n # Loop #\n for name in self.handle.sheet_names:\n sheet = self.handle.parse(name)\n sheet.insert(0, \"nace\", name)\n all_sheets.append(sheet)\n # Write #\n df = pandas.concat(all_sheets)\n df.to_csv(str(self.dest), **self.kwargs)",
"def test_template_export():\n test_file = get_test_filepath(\"test_notebook.ipynb\")\n stream, info = AvocadoExporter().from_filename(test_file)\n try:\n html_parser().parse(stream)\n except ParseError as err:\n if str(err) != \"Unexpected start tag token (script) in the after body phase.\":\n raise err",
"def save_and_upload_cohort_all_samples(all_samples, name, namespace, workspace, blacklist=[]):\n df = all_samples[['entity:sample_id']].rename(columns={'entity:sample_id': 'sample_id'})\n df['membership:sample_set_id'] = name\n\n # Re-arrange columns\n cols = ['membership:sample_set_id', 'sample_id']\n df = df[cols]\n\n # Blacklist\n df = df[ ~df['sample_id'].isin(blacklist) ]\n df.to_csv('all_samples/fc_upload_%s.txt'%name, index=None, sep=\"\\t\")\n res = upload_entities_from_tsv(namespace, workspace, 'all_samples/fc_upload_%s.txt'%name)\n return res",
"def dfs_tabs(df_list, sheet_list, file_name):\n\n writer = pd.ExcelWriter(file_name,engine='xlsxwriter') \n for dataframe, sheet in zip(df_list, sheet_list):\n dataframe.to_excel(writer, sheet_name=sheet, startrow=0 , startcol=0, index=False) \n writer.save()",
"def create_export_files(n,input_choice,timing,min_hull_per):\n\n\n\texists = os.path.isdir('analysis')\n\tif exists:\n\t\tf = open('analysis/results.csv','a',newline='')\n\t\tresults = csv.writer(f)\n\telse:\n\t\tos.mkdir('analysis')\n\t\tf = open('analysis/results.csv','w',newline='')\n\t\tresults = csv.writer(f)\n\t\tresults.writerow(['Algo','Size of Input','Min. Hull Pts Per','Type of Input','Timing'])\n\n\n\tresults.writerow(['Graham Scan',n,min_hull_per,input_choice,timing])"
]
| [
"0.7105065",
"0.65541244",
"0.6157658",
"0.5867363",
"0.57439923",
"0.5577072",
"0.5558953",
"0.5527643",
"0.5488577",
"0.5479095",
"0.54620737",
"0.5350241",
"0.53132707",
"0.52870476",
"0.5283209",
"0.52702224",
"0.52511656",
"0.5249359",
"0.523218",
"0.51937866",
"0.51871544",
"0.51664996",
"0.5152379",
"0.5148728",
"0.5144692",
"0.5143804",
"0.5111886",
"0.5111265",
"0.5093326",
"0.50747645"
]
| 0.70902795 | 1 |
Export workflow overview files. | def export_workflow(args):
if args.type == 'magnis':
clarity_epp.export.workflow.helix_magnis(lims, args.process_id, args.output_file)
elif args.type == 'mip':
clarity_epp.export.workflow.helix_mip(lims, args.process_id, args.output_file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def export_overview(self, outpath=None):\n orderby = self.orderby.get()\n currentregion = self.region.get()\n if not outpath:\n outpath = tkinter.filedialog.askdirectory()\n if outpath:\n export.export_overview(\n self.tabs.window.aistracker,\n self.tabs.window.nmeatracker,\n self.tabs.window.messagelog,\n outpath, orderby=orderby, region=currentregion)\n else:\n raise ExportAborted('Export cancelled by user.')",
"def export_everything(self):\n orderby = self.orderby.get()\n currentregion = self.region.get()\n previoustext = self.tabs.window.statuslabel['text']\n res = tkinter.messagebox.askyesno(\n 'Export Everything',\n 'Exporting data on all AIS stations, this may take some time.')\n if res:\n outpath = tkinter.filedialog.askdirectory()\n if outpath:\n self.tabs.window.statuslabel.config(\n text='Exporting all AIS station data to - {}'.format(\n outpath),\n fg='black', bg='gold')\n self.update_idletasks()\n export.export_overview(\n self.tabs.window.aistracker,\n self.tabs.window.nmeatracker,\n self.tabs.window.messagelog,\n outpath, orderby=orderby, region=currentregion)\n export.export_everything(\n self.tabs.window.aistracker,\n self.tabs.window.messagelog,\n outpath, orderby=orderby, region=currentregion)\n self.tabs.window.statuslabel.config(\n text=previoustext, bg='light grey')\n else:\n raise ExportAborted(\n 'Export of all AIS data cancelled by user.')\n else:\n raise ExportAborted('Export of all AIS data cancelled by user.')",
"def export_to(short_name):\r\n (app, owner, n_tasks, n_task_runs,\r\n overall_progress, last_activity) = app_by_shortname(short_name)\r\n title = app_title(app, gettext(\"Export\"))\r\n loading_text = gettext(\"Exporting data..., this may take a while\")\r\n\r\n try:\r\n require.app.read(app)\r\n except HTTPException:\r\n if app.hidden:\r\n raise abort(403)\r\n else: # pragma: no cover\r\n raise\r\n\r\n def respond():\r\n return render_template('/applications/export.html',\r\n title=title,\r\n loading_text=loading_text,\r\n app=app,\r\n owner=owner)\r\n\r\n def gen_json(table):\r\n n = db.session.query(table)\\\r\n .filter_by(app_id=app.id).count()\r\n sep = \", \"\r\n yield \"[\"\r\n for i, tr in enumerate(db.session.query(table)\r\n .filter_by(app_id=app.id).yield_per(1), 1):\r\n item = json.dumps(tr.dictize())\r\n if (i == n):\r\n sep = \"\"\r\n yield item + sep\r\n yield \"]\"\r\n\r\n def format_csv_properly(row):\r\n keys = sorted(row.keys())\r\n values = []\r\n for k in keys:\r\n values.append(row[k])\r\n return values\r\n\r\n\r\n def handle_task(writer, t):\r\n if (type(t.info) == dict):\r\n values = format_csv_properly(t.info)\r\n writer.writerow(values)\r\n else: # pragma: no cover\r\n writer.writerow([t.info])\r\n\r\n def handle_task_run(writer, t):\r\n if (type(t.info) == dict):\r\n values = format_csv_properly(t.info)\r\n writer.writerow(values)\r\n else: # pragma: no cover\r\n writer.writerow([t.info])\r\n\r\n def get_csv(out, writer, table, handle_row):\r\n for tr in db.session.query(table)\\\r\n .filter_by(app_id=app.id)\\\r\n .yield_per(1):\r\n handle_row(writer, tr)\r\n yield out.getvalue()\r\n\r\n def respond_json(ty):\r\n tables = {\"task\": model.task.Task, \"task_run\": model.task_run.TaskRun}\r\n try:\r\n table = tables[ty]\r\n except KeyError:\r\n return abort(404)\r\n return Response(gen_json(table), mimetype='application/json')\r\n\r\n def create_ckan_datastore(ckan, table, package_id):\r\n tables = {\"task\": model.task.Task, \"task_run\": model.task_run.TaskRun}\r\n new_resource = ckan.resource_create(name=table,\r\n package_id=package_id)\r\n ckan.datastore_create(name=table,\r\n resource_id=new_resource['result']['id'])\r\n ckan.datastore_upsert(name=table,\r\n records=gen_json(tables[table]),\r\n resource_id=new_resource['result']['id'])\r\n\r\n def respond_ckan(ty):\r\n # First check if there is a package (dataset) in CKAN\r\n tables = {\"task\": model.task.Task, \"task_run\": model.task_run.TaskRun}\r\n msg_1 = gettext(\"Data exported to \")\r\n msg = msg_1 + \"%s ...\" % current_app.config['CKAN_URL']\r\n ckan = Ckan(url=current_app.config['CKAN_URL'],\r\n api_key=current_user.ckan_api)\r\n app_url = url_for('.details', short_name=app.short_name, _external=True)\r\n\r\n try:\r\n package, e = ckan.package_exists(name=app.short_name)\r\n if e:\r\n raise e\r\n if package:\r\n # Update the package\r\n owner = User.query.get(app.owner_id)\r\n package = ckan.package_update(app=app, user=owner, url=app_url,\r\n resources=package['resources'])\r\n\r\n ckan.package = package\r\n resource_found = False\r\n for r in package['resources']:\r\n if r['name'] == ty:\r\n ckan.datastore_delete(name=ty, resource_id=r['id'])\r\n ckan.datastore_create(name=ty, resource_id=r['id'])\r\n ckan.datastore_upsert(name=ty,\r\n records=gen_json(tables[ty]),\r\n resource_id=r['id'])\r\n resource_found = True\r\n break\r\n if not resource_found:\r\n create_ckan_datastore(ckan, ty, package['id'])\r\n else:\r\n owner = User.query.get(app.owner_id)\r\n package = ckan.package_create(app=app, user=owner, url=app_url)\r\n create_ckan_datastore(ckan, ty, package['id'])\r\n #new_resource = ckan.resource_create(name=ty,\r\n # package_id=package['id'])\r\n #ckan.datastore_create(name=ty,\r\n # resource_id=new_resource['result']['id'])\r\n #ckan.datastore_upsert(name=ty,\r\n # records=gen_json(tables[ty]),\r\n # resource_id=new_resource['result']['id'])\r\n flash(msg, 'success')\r\n return respond()\r\n except requests.exceptions.ConnectionError:\r\n msg = \"CKAN server seems to be down, try again layer or contact the CKAN admins\"\r\n current_app.logger.error(msg)\r\n flash(msg, 'danger')\r\n except Exception as inst:\r\n if len(inst.args) == 3:\r\n t, msg, status_code = inst.args\r\n msg = (\"Error: %s with status code: %s\" % (t, status_code))\r\n else: # pragma: no cover\r\n msg = (\"Error: %s\" % inst.args[0])\r\n current_app.logger.error(msg)\r\n flash(msg, 'danger')\r\n finally:\r\n return respond()\r\n\r\n def respond_csv(ty):\r\n # Export Task(/Runs) to CSV\r\n types = {\r\n \"task\": (\r\n model.task.Task, handle_task,\r\n (lambda x: True),\r\n gettext(\r\n \"Oops, the application does not have tasks to \\\r\n export, if you are the owner add some tasks\")),\r\n \"task_run\": (\r\n model.task_run.TaskRun, handle_task_run,\r\n (lambda x: type(x.info) == dict),\r\n gettext(\r\n \"Oops, there are no Task Runs yet to export, invite \\\r\n some users to participate\"))}\r\n try:\r\n table, handle_row, test, msg = types[ty]\r\n except KeyError:\r\n return abort(404)\r\n\r\n out = StringIO()\r\n writer = UnicodeWriter(out)\r\n t = db.session.query(table)\\\r\n .filter_by(app_id=app.id)\\\r\n .first()\r\n if t is not None:\r\n if test(t):\r\n writer.writerow(sorted(t.info.keys()))\r\n\r\n return Response(get_csv(out, writer, table, handle_row),\r\n mimetype='text/csv')\r\n else:\r\n flash(msg, 'info')\r\n return respond()\r\n\r\n export_formats = [\"json\", \"csv\"]\r\n if current_user.is_authenticated():\r\n if current_user.ckan_api:\r\n export_formats.append('ckan')\r\n\r\n ty = request.args.get('type')\r\n fmt = request.args.get('format')\r\n if not (fmt and ty):\r\n if len(request.args) >= 1:\r\n abort(404)\r\n return render_template('/applications/export.html',\r\n title=title,\r\n loading_text=loading_text,\r\n ckan_name=current_app.config.get('CKAN_NAME'),\r\n app=app,\r\n owner=owner)\r\n if fmt not in export_formats:\r\n abort(415)\r\n return {\"json\": respond_json, \"csv\": respond_csv, 'ckan': respond_ckan}[fmt](ty)",
"def print_workflow_summary(workflow_stats ):\n\t# status\n\tworkflow_stats.set_job_filter('nonsub')\n\t# Tasks\n\ttotal_tasks = workflow_stats.get_total_tasks_status()\n\ttotal_succeeded_tasks = workflow_stats.get_total_succeeded_tasks_status()\n\ttotal_failed_tasks = workflow_stats.get_total_failed_tasks_status()\n\ttotal_unsubmitted_tasks = total_tasks -(total_succeeded_tasks + total_failed_tasks)\n\ttotal_task_retries = workflow_stats.get_total_tasks_retries()\n\ttotal_invocations = total_succeeded_tasks + total_failed_tasks + total_task_retries\n\t# Jobs\n\ttotal_jobs = workflow_stats.get_total_jobs_status()\n\ttotal_succeeded_jobs = workflow_stats.get_total_succeeded_jobs_status()\n\ttotal_failed_jobs = workflow_stats.get_total_failed_jobs_status()\n\ttotal_unsubmitted_jobs = total_jobs - (total_succeeded_jobs + total_failed_jobs )\n\ttotal_job_retries = workflow_stats.get_total_jobs_retries()\n\ttotal_job_instance_retries = total_succeeded_jobs + total_failed_jobs + total_job_retries\n\t# Sub workflows\n\tworkflow_stats.set_job_filter('subwf')\n\ttotal_sub_wfs = workflow_stats.get_total_jobs_status()\n\ttotal_succeeded_sub_wfs = workflow_stats.get_total_succeeded_jobs_status()\n\ttotal_failed_sub_wfs = workflow_stats.get_total_failed_jobs_status()\n\ttotal_unsubmitted_sub_wfs = total_sub_wfs - (total_succeeded_sub_wfs + total_failed_sub_wfs)\n\ttotal_sub_wfs_retries = workflow_stats.get_total_jobs_retries()\n\ttotal_sub_wfs_tries = total_succeeded_sub_wfs + total_failed_sub_wfs + total_sub_wfs_retries\n\n\t# tasks\n\tsummary_str = \"\"\n\tsummary_str += \"total_succeeded_tasks: \" + convert_to_str(total_succeeded_tasks)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_failed_tasks: \" + convert_to_str(total_failed_tasks)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_unsubmitted_tasks: \" + convert_to_str(total_unsubmitted_tasks)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_tasks: \" + convert_to_str(total_tasks)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_task_retries: \" + convert_to_str(total_task_retries)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_invocations: \" + convert_to_str(total_invocations)\n\tsummary_str += NEW_LINE_STR\n\n\n\tsummary_str += \"total_succeeded_jobs: \" + convert_to_str(total_succeeded_jobs)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_failed_jobs: \" + convert_to_str(total_failed_jobs)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_unsubmitted_jobs: \" + convert_to_str(total_unsubmitted_jobs)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_jobs:\" + convert_to_str(total_jobs)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_job_retries: \" + str(total_job_retries)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_job_instance_retries:\" + convert_to_str(total_job_instance_retries)\n\tsummary_str += NEW_LINE_STR\n\n\n\tsummary_str += \"total_succeeded_sub_wfs: \" + convert_to_str(total_succeeded_sub_wfs)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_failed_sub_wfs: \" + convert_to_str(total_failed_sub_wfs)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_unsubmitted_sub_wfs: \" + convert_to_str(total_unsubmitted_sub_wfs)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_sub_wfs: \" + convert_to_str(total_sub_wfs)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_sub_wfs_retries: \" + str(total_sub_wfs_retries)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_sub_wfs_tries: \" + convert_to_str(total_sub_wfs_tries)\n\tsummary_str += NEW_LINE_STR\n\n\tworkflow_states_list = workflow_stats.get_workflow_states()\n\tworkflow_wall_time = stats_utils.get_workflow_wall_time(workflow_states_list)\n\n\tif workflow_wall_time is None:\n\t\tsummary_str += \"workflow_runtime: -\"\n\telse:\n\t\tsummary_str += \"workflow_runtime: %-20s (total %d seconds)\" % \\\n\t\t\t\t(format_seconds(workflow_wall_time), (workflow_wall_time))\n\tsummary_str += NEW_LINE_STR\n\tworkflow_cum_job_wall_time = workflow_stats.get_workflow_cum_job_wall_time()[0]\n\tif workflow_cum_job_wall_time is None:\n\t\tsummary_str += \"cumulative_workflow_runtime_kickstart: -\"\n\telse:\n\t\tsummary_str += \"cumulative_workflow_runtime_kickstart: %-20s (total %d seconds)\" % \\\n\t\t\t(format_seconds(workflow_cum_job_wall_time),workflow_cum_job_wall_time)\n\tsummary_str += NEW_LINE_STR\n\tsubmit_side_job_wall_time = workflow_stats.get_submit_side_job_wall_time()[0]\n\tif submit_side_job_wall_time is None:\n\t\tsummary_str += \"cumulative_workflow_runtime_dagman: -\"\n\telse:\n\t\tsummary_str += \"cumulative_workflow_runtime_dagman: %-20s (total %d seconds)\" % \\\n\t\t\t(format_seconds(submit_side_job_wall_time), submit_side_job_wall_time)\n\treturn summary_str",
"def export(self):\n # check that session state is exportable\n if self.general_parser is None or \\\n self.specific_parser is None or \\\n ((self.audio_parser is None) and (self.audio_data_parser is None)) or \\\n ((self.video_parser is None) and (self.video_data_parser is None)) or \\\n self.month_selected is None or \\\n self.unique_audio_found is False or \\\n self.unique_video_found is False:\n\n self.cant_export_label.grid(row=13, column=6, columnspan=1, rowspan=5)\n raise Exception(\"you need to load the general, \"\n \"month-specific, \"\n \"audio and video words first\")\n\n self.cant_export_label.grid_remove()\n export_file = tkFileDialog.asksaveasfilename() # ask for output file\n\n unique_words = self.top_unique_audio + self.top_unique_video # concatenate\n unique_words = sorted(unique_words, key=self.get_count_from_rank, reverse=True) # sort\n\n with open(export_file, \"w\") as file:\n file.write(\"\\\"rank\\\" \\\"source\\\" \\\"word\\\" \\\"in_general\\\" \\\"count\\\"\\n\\n\") # print header\n\n for rank in self.top_unique_audio:\n\n for entry in rank:\n\n file.write(str(entry.rank) + \" audio \" +\n entry.word + \" \" +\n str(entry.in_general) + \" \" +\n str(entry.count) + \"\\n\")\n\n for rank in self.top_unique_video:\n\n for entry in rank:\n\n file.write(str(entry.rank) + \" video \" +\n entry.word + \" \" +\n str(entry.in_general) + \" \" +\n str(entry.count) + \"\\n\")",
"def export_files(self):\n if self.tabs.window.serverrunning:\n tkinter.messagebox.showwarning(\n 'WARNING', 'Cannot export files whilst server is running')\n elif self.tabs.window.aistracker.messagesprocessed == 0:\n tkinter.messagebox.showwarning(\n 'WARNING', 'Nothing to export.')\n else:\n commands = {'OVERVIEW': self.export_overview,\n 'EVERYTHING': self.export_everything,\n 'CSV': self.export_csv,\n 'TSV': self.export_tsv,\n 'KML': self.export_kml,\n 'KMZ': self.export_kmz,\n 'JSON': self.export_json,\n 'VERBOSE JSON': self.export_verbose_json,\n 'GEOJSON': self.export_geojson,\n 'AIS MESSAGES (DEBUG)': self.export_debug}\n option = self.exportoptions.get()\n try:\n commands[option]()\n tkinter.messagebox.showinfo(\n 'Export Files', 'Export Successful')\n except Exception as err:\n AISLOGGER.exception('export error')\n tkinter.messagebox.showerror(type(err).__name__, str(err))",
"def print_individual_workflow_stats(workflow_stats , title):\n\tcontent_str =\"<table class ='gallery_table'>\"\n\t# individual workflow status\n\n\t# workflow status\n\tworkflow_stats.set_job_filter('all')\n\ttotal_wf_retries = workflow_stats.get_workflow_retries()\n\tcontent = [title,convert_to_str(total_wf_retries) ]\n\ttitle_col_span = len(worklow_status_col_name) -1\n\tcontent_str += print_row(worklow_status_col_name, True)\n\twf_status_str = \"\"\"<tr><td colspan =\"\"\"+ str(title_col_span) + \"\"\" > \"\"\" + title + \"\"\"</td><td>\"\"\" + convert_to_str(total_wf_retries) +\"\"\"</td></tr>\"\"\"\n\n\t#tasks\n\tworkflow_stats.set_job_filter('nonsub')\n\ttotal_tasks = workflow_stats.get_total_tasks_status()\n\ttotal_succeeded_tasks = workflow_stats.get_total_succeeded_tasks_status()\n\ttotal_failed_tasks = workflow_stats.get_total_failed_tasks_status()\n\ttotal_unsubmitted_tasks = total_tasks -(total_succeeded_tasks + total_failed_tasks )\n\ttotal_task_retries = workflow_stats.get_total_tasks_retries()\n\ttotal_task_invocations = total_succeeded_tasks + total_failed_tasks + total_task_retries\n\tcontent =[\" \",\"Tasks\", convert_to_str(total_succeeded_tasks) , convert_to_str(total_failed_tasks), convert_to_str(total_unsubmitted_tasks) , convert_to_str(total_tasks) ,\" \",convert_to_str(total_task_retries), convert_to_str(total_task_invocations) ,\" \"]\n\ttasks_status_str = print_row(content)\n\n\t# job status\n\tworkflow_stats.set_job_filter('nonsub')\n\ttotal_jobs = workflow_stats.get_total_jobs_status()\n\ttotal_succeeded_jobs = workflow_stats.get_total_succeeded_jobs_status()\n\ttotal_failed_jobs = workflow_stats.get_total_failed_jobs_status()\n\ttotal_unsubmitted_jobs = total_jobs - (total_succeeded_jobs + total_failed_jobs )\n\ttotal_job_retries = workflow_stats.get_total_jobs_retries()\n\ttotal_job_invocations = total_succeeded_jobs + total_failed_jobs + total_job_retries\n\tcontent = [\" \",\"Jobs\",convert_to_str(total_succeeded_jobs), convert_to_str(total_failed_jobs) , convert_to_str(total_unsubmitted_jobs), convert_to_str(total_jobs) ,\" \",convert_to_str(total_job_retries), convert_to_str(total_job_invocations) ,\" \" ]\n\tjobs_status_str = print_row(content)\n\n\t# sub workflow\n\tworkflow_stats.set_job_filter('subwf')\n\ttotal_sub_wfs = workflow_stats.get_total_jobs_status()\n\ttotal_succeeded_sub_wfs = workflow_stats.get_total_succeeded_jobs_status()\n\ttotal_failed_sub_wfs = workflow_stats.get_total_failed_jobs_status()\n\ttotal_unsubmitted_sub_wfs = total_sub_wfs - (total_succeeded_sub_wfs + total_failed_sub_wfs )\n\ttotal_sub_wfs_retries = workflow_stats.get_total_jobs_retries()\n\ttotal_sub_wfs_invocations = total_succeeded_sub_wfs + total_failed_sub_wfs + total_sub_wfs_retries\n\tcontent = [\" \",\"Sub Workflows\",convert_to_str(total_succeeded_sub_wfs), convert_to_str(total_failed_sub_wfs) , convert_to_str(total_unsubmitted_sub_wfs), convert_to_str(total_sub_wfs) ,\" \",convert_to_str(total_sub_wfs_retries), convert_to_str(total_sub_wfs_invocations) ,\" \" ]\n\tsub_wf_status_str = print_row(content)\n\n\n\tcontent_str += wf_status_str +\"\\n\"\n\tcontent_str += tasks_status_str +\"\\n\"\n\tcontent_str += jobs_status_str +\"\\n\"\n\tcontent_str += sub_wf_status_str +\"\\n\"\n\tcontent_str +=\"</table>\"\n\treturn content_str",
"def test_export(self):\n structure = {\n \"README.rst\": \"Hi this is 1.0.0.\",\n \"twisted\": {\n \"newsfragments\": {\"README\": \"Hi this is 1.0.0\"},\n \"_version.py\": genVersion(\"twisted\", 1, 0, 0),\n \"web\": {\n \"newsfragments\": {\"README\": \"Hi this is 1.0.0\"},\n \"_version.py\": genVersion(\"twisted.web\", 1, 0, 0),\n },\n },\n }\n reposDir = self.makeRepository(self.tmpDir)\n self.createStructure(reposDir, structure)\n self.commitRepository(reposDir)\n\n exportDir = FilePath(self.mktemp()).child(\"export\")\n self.createCommand.exportTo(reposDir, exportDir)\n self.assertStructure(exportDir, structure)",
"def get(self) :\n self.generate('export.html', {\n 'xml' : export(),\n 'title' : \"Admin Export\"})",
"def describe_export_tasks(exportIds=None, filters=None, maxResults=None, nextToken=None):\n pass",
"def view_index(\n request: HttpRequest,\n workflow: Optional[Workflow] = None,\n) -> HttpResponse:\n # Get the views\n views = workflow.views.values(\n 'id',\n 'name',\n 'description_text',\n 'modified')\n\n # Build the table only if there is anything to show (prevent empty table)\n return render(\n request,\n 'table/view_index.html',\n {\n 'query_builder_ops': workflow.get_query_builder_ops_as_str(),\n 'table': ViewTable(views, orderable=False),\n },\n )",
"def export_notebook():\n #system(\"jupyter nbconvert --to HTML \\\"Look At Enron data set.ipynb\\\"\")\n system(\"jupyter nbconvert --to HTML --output=Look+At+Enron+data+set.html \\\"Look At Enron data set.ipynb\\\"\")\n return",
"def _on_collections_export(self, evt=None):\n \n # remove old exports\n for name in os.listdir(self._library.library_path):\n if EXPORT_PATTERN.match(name):\n os.remove(os.path.join(self._library.library_path, name))\n \n # get collections\n collections = self._library.search(core.Query(\"\", core.Collection.NAME))\n collections = [c for c in collections if c.export]\n \n # export collections\n for collection in collections:\n \n # get query\n if collection.query:\n query = core.Query(collection.query, core.Article.NAME)\n else:\n query = core.Query(\"%s[COLLECTIONID]\" % collection.dbid, core.Article.NAME)\n \n # get articles\n articles = self._library.search(query)\n \n # make export\n text = \"\"\n for article in articles:\n text += article.format(\"PDF: [PDF]\\n[TI]\\n[AU]\\n[CI]\\n\\n\")\n \n # init filename and path\n filename = \"_export_\"\n filename += collection.title.replace(\" \", \"_\")\n filename += \".txt\"\n path = os.path.join(self._library.library_path, filename)\n \n # save to file\n with open(path, 'w', encoding=\"utf-8\") as export:\n export.write(text)",
"def export_results(path: str):\n _, ext = Utils.get_filename_ext(path)\n\n if ext not in [\".md\", \".json\", \".csv\"]:\n click.echo(\"Output file must be of type markdown, csv or json.\\n\", err=True)\n return\n\n with open(path, \"w+\") as out:\n Exporter.write(Format(Utils.format_to_int(ext)), out)\n\n click.echo(f\"Successfully exported results to {path}\\n\")",
"def _export_button_cb(self):\n filename = asksaveasfile(\n mode='w',\n filetypes=(('YAML files', '*.yaml'), ('All files', '*.*'))\n )\n\n if not filename:\n return\n\n with open(filename.name, 'w') as f:\n f.write('obstacles:\\n')\n for obstacle in self.obstacles:\n f.write(f' - {str(obstacle)}')\n f.write('\\n')",
"def dashboard_workflow(self):\n # copy html/js/css templates to the workflow specific directory\n js_dir = Path(__file__).absolute().parent / \"dashboard_template\"\n for js_template in [\"dashboard.js\", \"index.html\", \"style.css\"]:\n shutil.copy2(js_dir / js_template, self.working_dir)",
"def export(self):\n memento = self.create_memento()\n try:\n f = open(\"story.txt\", \"w\")\n try:\n f.write(memento.__str__())\n finally:\n f.close()\n except IOError:\n print 'IOError while exporting story!'",
"def BT_export(self):\n src = os.path.join(self.resMan.base_path, Config.instance().weld_BT_root_folder)\n srcs=self.BTMan.get_subdirs(src)\n dst = os.path.join(self.project.rootdir, Config.instance().weld_BT_root_folder)\n #this operation has lots of exceptions to output...\n try:\n for src in srcs:\n self.BTMan.export(src, dst)\n except Exception, e:\n print >> sys.__stderr, 'ERROR in Weld.BT_export():'\n print >> sys.__stderr, e.args[0]\n print >> sys.__stderr, 'export cancelled (some cleanup might be needed in %s)' % dst",
"def export_wiki(export_dir, at_once):\r\n\r\n namespaces = get_namespaces()\r\n\r\n page_names = []\r\n for ns in namespaces:\r\n page_names.extend(get_all_pages_for_namespace(ns))\r\n\r\n print 'Full list of pages:'\r\n pprint(page_names)\r\n\r\n if at_once:\r\n export_filenames = export_at_once(export_dir, page_names)\r\n else:\r\n export_filenames = export_page_at_time(export_dir, page_names)\r\n\r\n return export_filenames",
"def create_workflow_file(self, workflow: Workflow, props: PropertySet):",
"def download(cls):\n cls._check_folder()\n os.chdir(cls.VIEWS_PATH)\n # iterate documents\n for doc in cls._documents:\n design_doc = doc().view()\n if design_doc is None:\n continue\n bucket_name = design_doc.bucket.name\n # iterate viewtypes (i.e. spatial and views)\n for view_type, views in design_doc.ddoc.iteritems():\n save_dir = '%s/%s/%s' % (bucket_name, design_doc.name, view_type)\n try:\n # remove and recreate the dir\n shutil.rmtree(save_dir, ignore_errors=True)\n os.makedirs(save_dir)\n except OSError:\n pass\n for name, view in views.iteritems():\n if isinstance(view, unicode) and view_type=='spatial':\n spatial_file = '%s/%s.spatial.js' % (save_dir, name)\n with open(spatial_file, 'w') as f:\n f.write(view)\n print 'Downloaded: %s' % spatial_file\n if isinstance(view, dict) and 'map' in view:\n map_file = '%s/%s.map.js' % (save_dir, name)\n with open(map_file, 'w') as f:\n f.write(view['map'])\n print 'Downloaded: %s' % map_file\n if isinstance(view, dict) and 'reduce' in view:\n reduce_file = '%s/%s.reduce.js' % (save_dir, name)\n with open(reduce_file, 'w') as f:\n f.write(view['reduce'])\n print 'Downloaded: %s' % reduce_file\n pass",
"def actioncluster_export(request, slug):\n actioncluster = get_object_or_404(ActionCluster.active, slug__exact=slug)\n if not actioncluster.has_member(request.user):\n raise Http404\n context = {\n 'object': actioncluster,\n 'url_list': actioncluster.actionclusterurl_set.all(),\n 'image_list': actioncluster.actionclustermedia_set.all(),\n 'feature_list': actioncluster.features.all(),\n 'member_list': actioncluster.members.select_related('profile').all(),\n\n }\n content = render_to_string('actionclusters/export.txt', context)\n response = HttpResponse(content, content_type='text/plain')\n filename = '%s-export-%s' % (\n actioncluster.slug, timezone.now().strftime(\"%Y%m%d-%H%M%S\"))\n response['Content-Disposition'] = (\n 'attachment; filename=\"%s.txt\"' % filename)\n response['Content-Length'] = len(response.content)\n return response",
"def export(format, output, config):\n config = read_config(config)\n changelogs = get_changelogs(config, tracked=True)\n\n fields = ('namespace', 'name', 'source')\n\n def extract_fields(item):\n return [item.get(key)\n for key in fields]\n\n data = map(extract_fields, changelogs)\n table = tablib.Dataset(*data)\n table.headers = fields\n data = getattr(table, format)\n if output:\n with open(output, 'wb') as f:\n f.write(data)\n else:\n click.echo(data)",
"def create_file(self):\n for data_element in self.data:\n title = data_element['title']\n anchor = data_element['href']\n example = data_element['example']\n content = data_element['content']\n if example:\n abstract = '<section class=\"prog__container\">{}<br>{}</section>'.format(content, example)\n\n list_of_data = [\n title, # api title\n 'A', # type is article\n '', # no redirect data\n '', # ignore\n '', # no categories\n '', # ignore\n '', # no related topics\n '', # ignore\n '', # no external link\n '', # no disambiguation\n '', # images\n abstract, # abstract\n anchor # url to doc\n ]\n self.output_file.write('{}\\n'.format('\\t'.join(list_of_data)))",
"def download_report():\n entities = get_names()\n save_csv(entities)",
"def task_render():\n target = 'analysis.html'\n dep = 'analysis.ipynb'\n return {\n 'file_dep': [dep],\n 'targets': [target],\n 'actions': [\n f\"jupyter nbconvert --execute --to html {dep}\"\n ],\n 'clean': True\n }",
"def list(self):\n\t\tendpoint = \"/api/walrus/project/\" + self.client.project_string_id + \\\n\t\t\t \"/export/working_dir/list\"\n\n\t\tresponse = self.client.session.get(self.client.host + endpoint)\n\n\t\tself.client.handle_errors(response)\n\n\t\texport_list_json = response.json().get('export_list')\n\t\texport_list = []\n\n\t\tif export_list_json:\n\t\t\tfor export_json in export_list_json:\n\t\t\t\texport_list.append(self.new(export_json))\n\n\t\treturn export_list",
"def _go_through_summary_reports(self):\n\n for result_file in self.result_files:\n self.cur_8digit_dir = os.path.split(result_file)[0]\n try:\n with open(result_file) as f_in:\n sum_rep = json.load(f_in)\n if sum_rep.has_key('Artifacts'):\n for linked_artifact in sum_rep['Artifacts']:\n artifact_path = linked_artifact['Path']\n # For now assume only files are linked (no folders)\n rel_path_from_results = os.path.join(self.cur_8digit_dir, artifact_path)\n if os.path.exists(rel_path_from_results):\n self.files_for_export.append(os.path.join('results',\n rel_path_from_results))\n if artifact_path.endswith('.json'):\n function_tag = artifact_path.replace('.','_').replace('/','_')\n\n if hasattr(self, function_tag):\n getattr(self, function_tag)()\n except IOError:\n print '{0} does not exist on this filesystem. I cannot be check for references '\\\n 'to other files.'.format(result_file)",
"def export_to_gsas():\n # Get workflow\n work_flow = my_data.get()\n\n output_file_name = '/tmp/acceptance_test.gda'\n\n # Clear the file if it exists.\n if os.path.exists(output_file_name):\n os.remove(output_file_name)\n\n status = work_flow.export_gsas_file(run_number=80231)\n assert status\n assert os.path.exists(output_file_name)",
"def export_representations(self):\n\n dbpath, config = self._start()\n self.logger.msg1(\"Loading ontology\")\n obo_path = check_file(config.obo, dbpath, \"obo\")\n self.obo = MinimalObo(obo_path, True)\n self._export_reference_representations()\n self._export_model_representations(config)\n self._end()"
]
| [
"0.649091",
"0.5712518",
"0.56824404",
"0.5679802",
"0.5617292",
"0.557171",
"0.55455595",
"0.54952663",
"0.5478947",
"0.54045343",
"0.536965",
"0.5348399",
"0.5322129",
"0.5303039",
"0.52954066",
"0.5293154",
"0.5290681",
"0.5270537",
"0.52693564",
"0.52677786",
"0.52497315",
"0.5238358",
"0.52002335",
"0.51990944",
"0.51922464",
"0.5176107",
"0.51545215",
"0.5149916",
"0.5132143",
"0.5131843"
]
| 0.6118105 | 1 |
Upload samples from helix output file. | def upload_samples(args):
clarity_epp.upload.samples.from_helix(lims, config.email, args.input_file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def upload_samples():\n # Retrieve a list of all files and paths within the target\n paths = Path(Config.target_dir).glob(Config.target_pattern)\n # Inform the user as to what we're doing\n logger.info(\"Assembling %s volume for submission\", Config.target_dir)\n # Loop through each identified file and upload it to the sandbox for analysis\n for path in paths:\n # Convert the path to a string\n filepath = str(path)\n # Grab the file name\n filename = os.path.basename(filepath)\n # Open the file for binary read, this will be our payload\n with open(filepath, 'rb') as upload_file:\n payload = upload_file.read()\n # Upload the file using the Sandbox\n response = Samples.upload_sample(file_name=filename, sample=payload)\n # Grab the SHA256 unique identifier for the file we just uploaded\n sha = response[\"body\"][\"resources\"][0][\"sha256\"]\n # Add this SHA256 to the volume payload element\n Analyzer.uploaded.append(sha)\n # Track the upload so we can remove the file when we're done\n Analyzer.files.append([filename, filepath, sha])\n # Inform the user of our progress\n logger.debug(\"Uploaded %s to %s\", filename, sha)",
"def upload_sample(self, file):\n return self._upload_sample(file)",
"def upload_samples(self, file):\n result = self._upload_sample(file)\n if \"samples\" in result:\n return result[\"samples\"]\n else:\n return [result]",
"def _upload_samples(self, samples):\n # Iterate over the full set of provided samples, uploading them in chunks.\n for offset in range(0, len(samples), self.upload_chunk_size):\n chunk = samples[offset:offset + self.upload_chunk_size]\n self.api.upload_samples(offset, chunk)",
"def sample(config, samples):\n url = get_api_path('sample.json')\n multiple_files = []\n images = [s['image'] for s in samples]\n labels = [s['label'] for s in samples]\n for image in images:\n multiple_files.append(('images', (image, open(image, 'rb'), 'image/png')))\n headers=get_headers(no_content_type=True)\n headers[\"config\"]= json.dumps(config, cls=HCEncoder)\n headers[\"labels\"]= json.dumps(labels)\n\n try:\n r = requests.post(url, files=multiple_files, headers=headers, timeout=30)\n return r.text\n except requests.exceptions.RequestException:\n e = sys.exc_info()[0]\n print(\"Error while calling hyperchamber - \", e)\n return None",
"def write_inputs(sampling_data):\n datanames = sampling_data.dtype.names\n tarputs = tarfile.open('smpl_mcnp_depl_inps.tar', 'w')\n for num, sample in enumerate(sampling_data):\n input = HomogeneousInput(sample['core_r'],\n sample['core_r']*sample['AR'],\n sample['power'])\n homog_comp = input.homog_core(sample['enrich'],\n sample['cool_r'],\n sample['PD'])\n input.write_mat_string(homog_comp)\n \n # identifying header string for post-processing\n header_str = ''\n for param in dimensions:\n header_str += str(round(sample[param], 5)) + ','\n # write the input and tar it\n filename = input.write_input(num, header_str)\n tarputs.add(filename)\n\n # write HTC input list\n htc_inputs = open('input_list.txt', 'w')\n htc_inputs.write('\\n'.join(glob.glob(\"*.i\")))\n htc_inputs.close()\n \n tarputs.add('input_list.txt')\n tarputs.close()",
"def create_samples(self):\n self._samples = self.load_samples()\n self.modify_samples()",
"def upload_file(self, file_path, file_name, output_path):",
"def upload(self, data: dict, replace: bool = False):\n for sample_data in data[\"samples\"]:\n chanjo_sample = self.chanjo_api.sample(sample_data[\"sample\"])\n if chanjo_sample and replace:\n self.chanjo_api.delete_sample(sample_data[\"sample\"])\n elif chanjo_sample:\n LOG.warning(\"sample already loaded, skipping: %s\", sample_data[\"sample\"])\n continue\n\n LOG.debug(\"upload coverage for sample: %s\", sample_data[\"sample\"])\n self.chanjo_api.upload(\n sample_id=sample_data[\"sample\"],\n sample_name=sample_data[\"sample_name\"],\n group_id=data[\"family\"],\n group_name=data[\"family_name\"],\n bed_file=sample_data[\"coverage\"],\n )",
"def _synth_output(self, path, files):\n features = np.empty((0, 6))\n for i in range(len(files)):\n train_set = np.load(f'{path}angles/{files[i]}.npy')\n features = np.concatenate((features, train_set), axis=0)\n self.output = F.normalize(torch.tensor(np.array(features[:, :5]), dtype=torch.float32))",
"def serialize_samples(self, writer:h5py.File, data_file:str, label_file:str):\n \n raise NotImplementedError('Method not implemented!')",
"def save_and_upload_cohort_all_samples(all_samples, name, namespace, workspace, blacklist=[]):\n df = all_samples[['entity:sample_id']].rename(columns={'entity:sample_id': 'sample_id'})\n df['membership:sample_set_id'] = name\n\n # Re-arrange columns\n cols = ['membership:sample_set_id', 'sample_id']\n df = df[cols]\n\n # Blacklist\n df = df[ ~df['sample_id'].isin(blacklist) ]\n df.to_csv('all_samples/fc_upload_%s.txt'%name, index=None, sep=\"\\t\")\n res = upload_entities_from_tsv(namespace, workspace, 'all_samples/fc_upload_%s.txt'%name)\n return res",
"def samples(self):\n pass",
"def ingest_data(self, input_file, num_bands, labels):\n self.labels = labels\n self.num_bands = num_bands\n with rasterio.open(input_file, \"r\") as dataset:\n for i in range(1, self.num_bands + 1):\n band = dataset.read(i)\n self.bands[self.labels[i - 1]] = band",
"def samples(self, samples):\n\n self._samples = samples",
"def _synth_input(self, path, files):\n features = np.empty((0, 15))\n for i in range(len(files)):\n train_set = np.load(f'{path}coords/{files[i]}.npy')\n train_set = train_set.reshape((train_set.shape[0], -1))\n features = np.concatenate((features, train_set), axis=0)\n self.input_ = F.normalize(torch.tensor(np.array(features), dtype=torch.float32))",
"def train(self, trainfile):",
"def save_samples(samples, output_prefix=\"sample\"):\n\n for (i, vertices) in enumerate(samples):\n vertex_fname = \"{pref}{i}_vertices.ply\".format(pref=output_prefix, i=i)\n if os.path.dirname(vertex_fname) == \"\":\n vertex_fname = \"./\" + vertex_fname\n mesh_io.Mesh.write_vertices_ply(None, vertex_fname, coords=vertices)",
"def _upload_sample_data(self, sample_id):\n run_elements = []\n\n for lane in range(1, 8):\n run_elements.append({\n 'run_id': self.run_id, 'project_id': self.project_id, 'sample_id': sample_id,\n 'library_id': self.library_id, 'run_element_id': '%s_%s_%s' % (self.run_id, lane, sample_id),\n 'useable': 'yes', 'barcode': self.barcode, 'lane': lane, 'bases_r1': 20000000000,\n 'bases_r2': 20000000000, 'clean_bases_r1': 18000000000, 'clean_bases_r2': 18000000000,\n 'q30_bases_r1': 19000000000, 'q30_bases_r2': 19000000000, 'clean_q30_bases_r1': 17000000000,\n 'clean_q30_bases_r2': 17000000000, 'clean_reads': 1\n })\n # Lane 8 has no data\n run_elements.append(\n {'run_id': self.run_id, 'project_id': self.project_id, 'sample_id': sample_id,\n 'library_id': self.library_id, 'run_element_id': '%s_%s_%s' % (self.run_id, 8, sample_id),\n 'useable': 'no', 'barcode': self.barcode, 'lane': 8, 'bases_r1': 0, 'bases_r2': 0,\n 'clean_bases_r1': 0, 'clean_bases_r2': 0, 'q30_bases_r1': 0,\n 'q30_bases_r2': 0, 'clean_q30_bases_r1': 0, 'clean_q30_bases_r2': 0,\n 'clean_reads': 0}\n )\n for e in run_elements:\n rest_communication.post_entry('run_elements', e)\n\n rest_communication.post_entry(\n 'samples',\n {'library_id': self.library_id, 'project_id': self.project_id, 'sample_id': sample_id,\n 'run_elements': [e['run_element_id'] for e in run_elements],\n 'required_coverage': 30, 'required_yield': 120000000000}\n )\n rest_communication.post_entry('projects', {'project_id': self.project_id, 'samples': [sample_id]})",
"def upload_sample(a1000):\n file_entry = demisto.getFilePath(demisto.getArg('entryId'))\n\n try:\n with open(file_entry['path'], 'rb') as f:\n response_json = a1000.upload_sample_from_file(f,\n custom_filename=file_entry.get('name'),\n tags=demisto.getArg('tags'),\n comment=demisto.getArg('comment')).json()\n except Exception as e:\n return_error(str(e))\n\n markdown = f'''## ReversingLabs A1000 upload sample\\n **Message:** {response_json.get('message')}\n **ID:** {demisto.get(response_json, 'detail.id')}\n **SHA1:** {demisto.get(response_json, 'detail.sha1')}\n **Created:** {demisto.get(response_json, 'detail.created')}'''\n\n command_result = CommandResults(\n outputs_prefix='ReversingLabs',\n outputs={'a1000_upload_report': response_json},\n readable_output=markdown\n )\n\n file_result = fileResult('Upload sample report file', json.dumps(response_json, indent=4),\n file_type=EntryType.ENTRY_INFO_FILE)\n\n return [command_result, file_result]",
"def main():\n parser = argparse.ArgumentParser(description='ivector runner')\n parser.add_argument('--wav-dir', required=True,\n help='directory to original audio files')\n parser.add_argument('--label-file', required=True,\n help='label files')\n parser.add_argument('--out-dir', required=True,\n help='output directory to data')\n args = parser.parse_args()\n\n tmp_dir = os.path.join(args.out_dir, \"tmp\")\n utt2spk_path = os.path.join(args.out_dir, \"utt2spk\")\n wavscp_path = os.path.join(args.out_dir, \"wav.scp\")\n\n if (os.system(\"mkdir -p %s\" % (tmp_dir)) != 0):\n print(\"Error making directory %s\" % (tmp_dir))\n\n # refer to the label and create utt2spk, wav.scp\n f_utt2spk = open(utt2spk_path, 'w')\n f_wavscp = open(wavscp_path, 'w')\n f_label = open(args.label_file, 'r')\n for line in f_label:\n items = line.strip().split(' ')\n wav_file_path = os.path.join(args.wav_dir, items[0] + '.wav')\n if os.path.isfile(wav_file_path):\n f_utt2spk.write(items[1] + '_' + line)\n f_wavscp.write(items[1] + '_' + items[0] + ' ' + wav_file_path + '\\n')\n else:\n raise FileExistsError(\"wav file does not exist: %s\" % wav_file_path)\n f_label.close()\n f_utt2spk.close()\n f_wavscp.close()",
"def upload_sample(host, port, path, read_type='protobuf'):\n with Connection.connect(host, port) as conn:\n zipped = (path.endswith(\".gz\"))\n r = Reader(path, read_type, zipped)\n hello = r.read_hello()\n hello_bytes = hello.SerializeToString()\n num_snapshot = 1\n for snap in r:\n send_hello(conn, hello_bytes)\n config_bytes = get_config(conn)\n config = Config.deserialize(config_bytes)\n filter_snapshot(snap, config)\n send_snapshot(conn, snap.SerializeToString())\n num_snapshot += 1",
"def save_and_upload_samples(data, namespace, workspace, tsca_id):\n os.system('mkdir -p %s'%tsca_id)\n filename = '%s/fc_upload_samples_tsca_%s.txt' % (tsca_id, tsca_id)\n data.to_csv(filename, sep='\\t', index=False)\n res = upload_entities_from_tsv(namespace, workspace, filename)\n return res",
"def collect_samples(self):\n self.__running = True\n with open(self.__filename, 'a') as output:\n next_sample_time = time.time()\n while self.__running:\n sensor_name = self.__sensor.get_sensor_type_name()\n sensor_id = self.__sensor.get_sensor_id()\n data = self.__sensor.retrieve_data_string() \n if DEBUG:\n print('data: \"{}\"'.format(data),\n file = sys.stderr, flush=True)\n when = datetime.datetime.now(datetime.timezone.utc).isoformat()\n result = OUTPUT_FORMAT.format(when,\n sensor_name, \n sensor_id, \n data)\n output.write(result)\n output.flush()\n \n next_sample_time = next_sample_time + self.__interval\n delay_time = next_sample_time - time.time()\n if DEBUG:\n print('delay_time = {}'.format(delay_time),\n file=sys.stderr, flush=True)\n \n if 0 < delay_time: # don't sleep if already next sample time\n time.sleep(delay_time)",
"def load_sample(self):\n\n self.load_images(self.folder + \"/sampleSet.txt\")\n self.load_traces(self.folder + \"/sampleLabel.txt\")",
"def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')",
"def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')",
"def test_upload_voice_dataset(self):\n pass",
"def gather_sample(self, my_file, collector=None):\n\n pass",
"def sample_input(self, loader, is_test=False):\n pass"
]
| [
"0.6146297",
"0.6064363",
"0.60513383",
"0.5952946",
"0.59492606",
"0.593849",
"0.58671814",
"0.5859735",
"0.57725275",
"0.5753233",
"0.57029307",
"0.56819546",
"0.561137",
"0.5601123",
"0.5592826",
"0.55898684",
"0.5573676",
"0.55560964",
"0.55062276",
"0.5494329",
"0.54717195",
"0.54649246",
"0.54566187",
"0.5438997",
"0.54329467",
"0.5370679",
"0.5370679",
"0.53698206",
"0.53662187",
"0.5342146"
]
| 0.77155775 | 0 |
Set QC status based on fragment length measurement. | def qc_fragment_length(args):
clarity_epp.qc.fragment_length.set_qc_flag(lims, args.process_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_length(self, length):\n if length < 0:\n raise AttributeError('length should be positive')\n self.progress_char_length = length",
"def test_one_contig_threshold(self):\n # Make sure the error code is not set before running command\n assert_false(hasattr(self,\"c\"))\n # Longest contig is 33356 so we put the threshold just below\n self.run_command(tags=[\"--length_threshold 33350\"])\n # The command should have failed with code 255\n assert_true(hasattr(self,\"c\"))\n assert_equal(self.c,255)",
"def length_changed(self):\n\t\tprint \"length changed\"\n\t\tlength_cbox = self.ui.findChild(QWidget, \"length_cbox\")\t\n\t\tnew_length = length_cbox.currentText()\n\t\t# todo: calculate new values",
"def _update_status(self):\n if any([abs(v) > LIMITS[i] for i, v in enumerate(self.state)]):\n self.terminal = True\n elif abs(self.q[3]) < LIMITS[9]:\n self.terminal = True\n elif self.steps + 1 >= self.max_steps:\n self.terminal = True",
"def setupStatus(start, end):\n global startsector, sectorlen, readsector, progressbar\n startsector = start\n sectorlen = end - start\n readsector = start\n progressbar = [\" \" for i in range(0,30)]\n return",
"def set_QUALITY(self,newQual):\n\t\tself.QUALITY = newQual",
"def _setVals(self, cmd_length=0):\n self.cmd_length = cmd_length",
"def _setVals(self, tp: CQCType = 0, length: int = 0) -> None:\n self.type = tp\n self.length = length",
"def setGoalLength(self, length):\n assert isinstance(length, int)\n self.goal_length = length",
"async def gpt2_set_length(self, ctx, *, arg=None):\n print('Command gpt2_set_length triggered')\n if arg:\n try:\n i = int(arg)\n assert (i > 0) and (i < 1024)\n except ValueError or AssertionError:\n ctx.send(\"ERROR: Argument must be a positive integer number\")\n self.update_config(length=arg)\n else:\n await ctx.send(\"ERROR: Argument required\")",
"def set_status(self):\r\n string = \"%9.3f%s/%9.3f%s\"\r\n unit1 = unit2 = \"b\"\r\n used = self.usedBytes.get()\r\n total = self.totalBytes.get()\r\n if used > total:\r\n self.label.config(fg=\"red\")\r\n else:\r\n self.label.config(fg=\"black\")\r\n if used > 999999:\r\n unit1 = \"Mb\"\r\n used /= 1000000.0\r\n elif used > 999:\r\n unit1 = \"Kb\"\r\n used /= 1000.0\r\n if total > 999999:\r\n unit2 = \"Mb\"\r\n total /= 1000000.0\r\n elif total > 999:\r\n unit2 = \"Kb\"\r\n total /= 1000.0\r\n self.textStatus.set(string % (used, unit1, total, unit2))",
"def setMyStatus(self):\n self.clearMyStatus()\n self.mass = self.myShipHull.mass\n for position, myQuad in self.quads.iteritems():\n self.maxBattery += myQuad.maxBattery\n self.currentPower += myQuad.maxPower\n self.thrust += myQuad.thrust\n self.rotation += myQuad.rotation\n self.radar += myQuad.radar\n self.jamming += myQuad.jamming\n self.repair += myQuad.repair\n self.mass += myQuad.mass\n self.maxAssault += myQuad.maxAssault\n\n # scale back attributes if internal structure has been hit\n ratio = self.currentISP/self.myShipHull.maxISP\n self.currentPower = self.currentPower * ratio\n self.thrust = self.thrust * ratio\n self.rotation = self.rotation * ratio\n\n self.accel = self.myDesign.getAccel(self.thrust, self.mass)\n self.accel = self.accel\n\n self.rotation = self.myDesign.getRotation(self.rotation, self.mass)\n self.rotation = self.rotation\n self.setMyStrength()\n self.setWeaponStatus()\n self.setRange()\n self.setAssaultStrength(ratio)",
"def load_qc(self):\n rejectval = self.config.getint('profiles', 'qcreject') \n self.qc = self.read_var(self.qcvar)\n self.qc = self.qc.astype('unicode')\n self.qc[self.qc == '1'] = 1\n self.qc[np.logical_or(self.qc=='4', self.qc == '0')] = 0\n self.qc = self.qc.astype(bool)\n self.test_shape(self.qcvar, self.qc.shape, 2)",
"def set_progress(self, progress: float):",
"def increment(self, length):\r\n self.progress_bar.update(length)",
"def set_length(self, new_length):\n if(new_length == None):\n self._logger.write(\"Error! new_length cannot be a NoneType\")\n elif(type(new_length) != float):\n self._logger.write(\"Error! new_length must be of type float\")\n else:\n try:\n self._length = new_length\n except Exception as e:\n self._logger.write(\"Error! Could not set the new length:\\n %s\" % e)",
"def setLength(self, new_length):\n\n self.length = new_length",
"def set_length(self, ak_tpl: BKT, newLength: float): # -> None:\n ...",
"async def set_status(ctx, *, status: str):\n if len(status) > 128:\n await ctx.send(\"This status is too long! (128 character limit)\")\n else:\n Config.set_status(status)\n await ctx.send(\"Successfully updated status\")",
"def _on_len_change(self, event=None):\n with self.layer.events.length.blocker():\n self.lengthSpinBox.setValue(self.layer.length)",
"def setMyStatus(self):\n self.clearMyStatus()\n for id, myComponent in self.components.iteritems():\n self.currentComps += 1\n if myComponent.type != '':\n compData = self.componentdata[myComponent.type]\n if myComponent.currentHP == myComponent.myComponentData.maxHP:\n # regular component set quad Attributes\n if compData.typeAP != '':\n self.typeAP = compData.typeAP\n elif compData.assault > 0:\n self.maxAssault += compData.assault\n self.maxAP += compData.maxAP\n self.maxSP += compData.maxSP\n self.genSP += compData.genSP\n self.maxPower += compData.power\n self.maxBattery += compData.battery\n self.thrust += compData.engine\n self.rotation += compData.rotate\n self.radar += compData.radar\n self.jamming += compData.jamming\n self.repair += compData.repair\n self.target += compData.target\n self.mass += compData.mass\n \n # tell weapons in quad to recalc their status\n for id, myWeapon in self.weapons.iteritems():\n myWeapon.setMyStatus()",
"def test_set_scan_status(self):\n pass",
"def update_progress(self):\n report = self.build_progress_report()\n self.conduit.set_progress(report)",
"def update_progress(self):\n report = self.build_progress_report()\n self.conduit.set_progress(report)",
"def qc_qubit(args):\n clarity_epp.qc.qubit.set_qc_flag(lims, args.process_id)",
"def set_status(self, status):\n if status == 'qw':\n status = 'Waiting'\n elif status == 'hqw':\n status = 'Held'\n elif status == 'Eqw':\n status = 'Error'\n else:\n sys.exit(20)\n self.status = status\n return",
"def set_reduction_status(self, status, message, chopped_data):\n # check input\n assert isinstance(\n status, bool), 'Reduction status must be given by bool but not {0}'.format(type(status))\n assert isinstance(message, str), 'Reduction message {0} must be string but not {1}' \\\n ''.format(message, type(message))\n assert isinstance(chopped_data, bool), 'Flag for being chopped run must be boolean but not {0}' \\\n ''.format(type(chopped_data))\n\n self._reductionStatus = status\n self._reductionInformation = message\n self._isChopped = chopped_data\n\n return",
"def __measurement_mode(self):\n self.__measurement_modes = {\"DISCRETE\": 120, \"BATCH\": 1200, \"VIDEO\": 1200, \"STREAMING\": 1200}\n try:\n max_len = self.__measurement_modes[self.measurement_mode]\n except KeyError:\n raise KeyError(\"Invalid measurement mode given\")\n\n self.num_chunks = int(self.video_length / self.chunk_length)\n self.max_chunks = int(max_len / self.chunk_length)",
"def set_last_segment_length(self, length):\n prior_length = self.segments[-1].get_length()\n if prior_length != -1:\n self.end_time -= prior_length\n\n self.segments[-1].set_length(length)\n self.end_time += length",
"def svn_info_t_working_size_set(svn_info_t_self, apr_size_t_working_size): # real signature unknown; restored from __doc__\n pass"
]
| [
"0.5374626",
"0.5257471",
"0.5234235",
"0.5224763",
"0.51419026",
"0.5140727",
"0.5005431",
"0.49226668",
"0.4871184",
"0.48435473",
"0.47917712",
"0.46944144",
"0.46696663",
"0.46632966",
"0.46603596",
"0.4644813",
"0.4607283",
"0.45766616",
"0.4563704",
"0.4546243",
"0.45312554",
"0.45272598",
"0.4518849",
"0.4518849",
"0.4470986",
"0.4454742",
"0.44509542",
"0.4446664",
"0.44165412",
"0.4410739"
]
| 0.64125764 | 0 |
Set QC status based on qubit measurement. | def qc_qubit(args):
clarity_epp.qc.qubit.set_qc_flag(lims, args.process_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setMyStatus(self):\n self.clearMyStatus()\n for id, myComponent in self.components.iteritems():\n self.currentComps += 1\n if myComponent.type != '':\n compData = self.componentdata[myComponent.type]\n if myComponent.currentHP == myComponent.myComponentData.maxHP:\n # regular component set quad Attributes\n if compData.typeAP != '':\n self.typeAP = compData.typeAP\n elif compData.assault > 0:\n self.maxAssault += compData.assault\n self.maxAP += compData.maxAP\n self.maxSP += compData.maxSP\n self.genSP += compData.genSP\n self.maxPower += compData.power\n self.maxBattery += compData.battery\n self.thrust += compData.engine\n self.rotation += compData.rotate\n self.radar += compData.radar\n self.jamming += compData.jamming\n self.repair += compData.repair\n self.target += compData.target\n self.mass += compData.mass\n \n # tell weapons in quad to recalc their status\n for id, myWeapon in self.weapons.iteritems():\n myWeapon.setMyStatus()",
"def setQ(self,Q):\n self.Q = Q",
"def test_set_1(self):\n\n qubit.set(1, 0)\n\n result = qubit.measure(polarization)\n self.assertEqual(1, result)",
"def test_set_0(self):\n\n polarization = 1\n\n qubit.set(1, 1)\n\n result = qubit.measure(polarization)\n self.assertEqual(1, result)",
"def update_progress(self):\n report = self.build_progress_report()\n self.conduit.set_progress(report)",
"def update_progress(self):\n report = self.build_progress_report()\n self.conduit.set_progress(report)",
"def set_status(self, status):\n if status == 'qw':\n status = 'Waiting'\n elif status == 'hqw':\n status = 'Held'\n elif status == 'Eqw':\n status = 'Error'\n else:\n sys.exit(20)\n self.status = status\n return",
"def set_qxqz(self):\n self.qx = self._q_x()\n self.qz = self._q_z()",
"def updateQ_value(self, value):\n self.Q_value = (self.Q_value * self.nVisits + value) / (self.nVisits + 1)",
"def setMyStatus(self):\n self.clearMyStatus()\n self.mass = self.myShipHull.mass\n for position, myQuad in self.quads.iteritems():\n self.maxBattery += myQuad.maxBattery\n self.currentPower += myQuad.maxPower\n self.thrust += myQuad.thrust\n self.rotation += myQuad.rotation\n self.radar += myQuad.radar\n self.jamming += myQuad.jamming\n self.repair += myQuad.repair\n self.mass += myQuad.mass\n self.maxAssault += myQuad.maxAssault\n\n # scale back attributes if internal structure has been hit\n ratio = self.currentISP/self.myShipHull.maxISP\n self.currentPower = self.currentPower * ratio\n self.thrust = self.thrust * ratio\n self.rotation = self.rotation * ratio\n\n self.accel = self.myDesign.getAccel(self.thrust, self.mass)\n self.accel = self.accel\n\n self.rotation = self.myDesign.getRotation(self.rotation, self.mass)\n self.rotation = self.rotation\n self.setMyStrength()\n self.setWeaponStatus()\n self.setRange()\n self.setAssaultStrength(ratio)",
"def _update_status(self):\n if any([abs(v) > LIMITS[i] for i, v in enumerate(self.state)]):\n self.terminal = True\n elif abs(self.q[3]) < LIMITS[9]:\n self.terminal = True\n elif self.steps + 1 >= self.max_steps:\n self.terminal = True",
"def tweak_q(self, q):\n self._q = q\n self.reset()",
"def load_qc(self):\n rejectval = self.config.getint('profiles', 'qcreject') \n self.qc = self.read_var(self.qcvar)\n self.qc = self.qc.astype('unicode')\n self.qc[self.qc == '1'] = 1\n self.qc[np.logical_or(self.qc=='4', self.qc == '0')] = 0\n self.qc = self.qc.astype(bool)\n self.test_shape(self.qcvar, self.qc.shape, 2)",
"def _setVals(self, qubit_id=0):\n self.qubit_id = qubit_id",
"def run(self):\n self.run_measurement()\n self.run_analysis()\n if self.get_param_value('update'):\n self.run_update()\n self.dev.update_cancellation_params()\n\n if self.get_param_value('configure_mux_drive'):\n drive_lo_freqs = self.get_param_value('drive_lo_freqs')\n configure_qubit_mux_drive(self.qubits, drive_lo_freqs)",
"def setstatus(self,statusin):\n if self.status in self.statuserrors:\n print \"Action forbidden.\"\n self.inputstatus.setEntry(self.status)\n return \n if statusin == None: status=self.inputstatus.getEntry()\n else: status=statusin\n if status not in self.statuses:\n print \"Unknown status:\",status\n #self.inputstatus.setEntry(self.status)\n return\n option=status[0]\n cmd=\"setStatus(\"+'\"'+self.detector+'\"'+\",\"+self.inpnumDIM +\",\"+\"'\"+option+\"'\"+\")\"\n output=self.vb.io.execute(cmd,log=\"out\",applout=\"<>\")\n #self.getstatus()",
"def _set_status(self):\n result = self._get_status()\n if result and result[0]['state'] == 'aborted':\n raise Exception(\"Aborted because the status flag is set to 'aborted' in dynamodb\")\n\n # record the status\n self.status['timestamp'] = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n self.db_handler.update_item({'api_version': TsV2CatalogHandler.api_version}, self.status)",
"def Q(self, value):\n assert value > 0, \"Q needs to be positive and above zero (we divide by Q)\"\n self._Q = value\n self._update()",
"def __init__(self, qubit, bit, circuit=None):\n super().__init__(\"measure\", [], [qubit], [bit], circuit)",
"def set_QUALITY(self,newQual):\n\t\tself.QUALITY = newQual",
"def test_set_scan_status(self):\n pass",
"def SetStatus(self, status):\r\n self.status = status",
"def run(self):\n self.run_measurement()\n self.run_analysis()\n self.results = self.analysis.proc_data_dict['analysis_params_dict']\n if self.get_param_value('update'):\n self.run_update()\n self.dev.update_cancellation_params()\n\n if self.get_param_value('configure_mux_drive'):\n drive_lo_freqs = self.get_param_value('drive_lo_freqs')\n configure_qubit_mux_drive(self.qubits, drive_lo_freqs)",
"def Set_Meas_Statistics(self, state, ch=1):\n self.write(f':MEAS{ch}:STAT:ENAB {state}')",
"def _update_quality(self, quality:float):\n if quality is not None:\n self.quality_text.setText(str(quality)[0:5])\n if quality > 0.9:\n self.quality_text.setStyleSheet(\"QLabel { color : green }\")\n elif quality > 0.5:\n self.quality_text.setStyleSheet(\"QLabel { color : yellow }\")\n else:\n self.quality_text.setStyleSheet(\"QLabel { color : red }\")\n else:\n self.quality_text.setText(\"\")",
"def _set_status(self, action, status):\n raise NotImplementedError(\"Base class: cannot be called directly\")",
"def set_progress_value(self, value):\r\n\r\n pass",
"def set_status(self, status):\n # TODO log to db\n self.status = status",
"def quiet(self, q=True):\n self._quiet = bool(q)",
"def set_monitor(self, track, xclip, ident, args):\n if track in self.song().tracks and not track.is_foldable:\n if args in MON_STATES:\n track.current_monitoring_state = MON_STATES[args]\n else:\n if track.current_monitoring_state == 2:\n track.current_monitoring_state = 0\n else:\n track.current_monitoring_state += 1"
]
| [
"0.5476707",
"0.5449999",
"0.541398",
"0.5391995",
"0.5284747",
"0.5284747",
"0.5279951",
"0.5277346",
"0.527465",
"0.52394056",
"0.5238075",
"0.52378446",
"0.5227095",
"0.51985204",
"0.5135898",
"0.51245904",
"0.51070684",
"0.50971496",
"0.50670874",
"0.5066934",
"0.50367576",
"0.4908126",
"0.49080208",
"0.48955217",
"0.488438",
"0.48827907",
"0.4866585",
"0.48646498",
"0.48480126",
"0.48472226"
]
| 0.652974 | 0 |
Change artifact name to sequence name. | def placement_artifact_set_name(args):
if args.type == 'sequence_name':
clarity_epp.placement.artifact.set_sequence_name(lims, args.process_id)
elif args.type == 'run_id':
clarity_epp.placement.artifact.set_runid_name(lims, args.process_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_sequence_name___fix():\n # do not consider referenced shot nodes\n shots = pm.ls(type=\"shot\")\n shot = None\n for s in shots:\n if s.referenceFile() is None:\n shot = s\n break\n\n sequencers = shot.outputs(type=\"sequencer\")\n if not sequencers:\n raise PublishError(\"There are no sequencers in the scene!\")\n\n sequencer = sequencers[0]\n\n # get current task\n from anima.dcc import mayaEnv\n\n m = mayaEnv.Maya()\n v = m.get_current_version()\n task = v.task\n\n # get sequence and scene names\n sequence_name = get_seq_name_from_task(task)\n scene_name = get_scene_name_from_task(task)\n\n # set sequencer name as seq_name + sc_name\n name = \"%s_%s\" % (sequence_name, scene_name)\n sequencer.set_sequence_name(name)",
"def _project_name_to_package_name(project_name):\n return project_name.lower().replace('-', '')",
"def seq_name(seq):\n if len(seq) == 1:\n return cp_name(seq[0])\n return 'u' + '_'.join('%04X' % cp for cp in seq)",
"def reformat(self, seq_name, *, prefix=\"s\"):\n\t\treturn \"%s_%012u\" % (prefix, self.get_sid(seq_name))",
"def _transform_name(self) -> None:\n self.name = utils.maybe_rename_for_k8s(self.name)",
"def _name_increment_revision(name):\n revre = r\"^(.*?)([0-9]+)$\"\n m = re.search(revre, name)\n if m:\n name = m.group(1) + str(int(m.group(2)) + 1)\n else:\n name = name + \" (copy)\"\n return name",
"def unique_project_name(prefix: str = \"selenium-project\"):\n return f'{prefix}-{uuid.uuid4().hex[:8]}'",
"def get_package_name(self):\n return self.name + '-' + self.version + '-' + self.release",
"def check_sequence_name_format(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n\n progress_controller.maximum = 2\n\n # do not consider referenced shot nodes\n shots = pm.ls(type=\"shot\")\n shot = None\n for s in shots:\n if s.referenceFile() is None:\n shot = s\n break\n\n sequencer = shot.outputs(type=\"sequencer\")[0]\n\n # get current task\n from anima.dcc import mayaEnv\n\n m = mayaEnv.Maya()\n v = m.get_current_version()\n task = v.task\n\n # get sequence and scene names\n sequence_name = get_seq_name_from_task(task)\n scene_name = get_scene_name_from_task(task)\n\n progress_controller.increment()\n # set sequencer name as seq_name + sc_name\n name = \"%s_%s\" % (sequence_name, scene_name)\n\n if sequencer.get_sequence_name() != name:\n progress_controller.complete()\n raise PublishError(\n \"Sequence name format is not correct!!!<br>\"\n \"<br>\"\n \"It should have been:<br>\"\n \"<br>\"\n \"%s<br>\"\n \"<br>\"\n \"But found:<br>\"\n \"%s\" % (name, sequencer.get_sequence_name())\n )\n\n progress_controller.complete()",
"def rename_sequences(self, new_fasta, mapping):\n assert isinstance(new_fasta, FASTA)\n new_fasta.create()\n for seq in self:\n new_name = mapping[seq.id]\n nucleotides = str(seq.seq)\n new_fasta.add_str(nucleotides, new_name)\n new_fasta.close()",
"def web_archive_rename(id, name):\n\n if name is not None:\n yt_rename_playlist(id, name)\n time.sleep(5)",
"def get_package_name(self):\n return self.name + '-' + self.version",
"def rename(cls, phase, name):\r\n cls.phases.pop(phase.name)\r\n cls.renames[phase.name] = name\r\n phase.name = name\r\n cls.phases[name] = phase",
"def generate_workflow_name(self) -> str:\n pass",
"def name(self, new_name):\n self.rename(new_name)",
"def updateName(g):\n try:\n n = int(g.group(2))\n except TypeError:\n n = 0\n\n return \"%s-%d\" % (g.group(1), n + 1)",
"def rAssetName(nodeNS):\n\t#return nodeNS.split('_')[0] + re.sub('.*?([0-9]*)$', r'\\1', nodeNS)\n\treturn nodeNS.split('_')[0]",
"def rename_slides(self):\n for idx, slide in enumerate(self):\n partname_str = '/ppt/slides/slide%d.xml' % (idx+1)\n slide.partname = PackURI(partname_str)",
"def get_seq_name_from_task(task):\n sequence = None\n while task is not None:\n if task.entity_type == \"Sequence\":\n sequence = task\n break\n else:\n task = task.parent\n\n if sequence:\n sequence_name = sequence.name\n else:\n sequence_name = \"SEQ\"\n\n return sequence_name",
"def simplify_job_name(name):\r\n name = name.split('/')[-1]\r\n if 'ver' in name:\r\n name = name.split('ver')[0] + '.ISO'\r\n return name",
"def get_project_assembly_name(deps_path):\n filename = os.path.basename(deps_path)\n return filename[:-len(DEPS_EXTENSION)]",
"def normalize_pypi_name(s: str) -> str:\n return NORMALIZE_PACKAGE_NAME_RE.sub(\"-\", s).lower()",
"def _generate_module_sheet_name(self, module_index):\n _module = self.app.get_module(module_index)\n sheet_name = \"_\".join([\"module\", _module.unique_id])\n self.slug_to_name[_module.unique_id] = _module.name\n return sheet_name",
"def rename(oldname, newname):",
"def replace_suffix (name, new_suffix):\n assert isinstance(name, basestring)\n assert isinstance(new_suffix, basestring)\n split = os.path.splitext (name)\n return split [0] + new_suffix",
"def get_release_name(self) -> str:\n if self.season is not None and self.episode is None:\n return os.path.basename(os.path.dirname(self.file))\n return os.path.splitext(os.path.basename(self.file))[0]",
"def generate_workflow_name(self) -> str:\n repository_name = self._parsed_url.basename_without_extension\n if self._git_ref:\n workflow_name = f\"{repository_name}-{self._git_ref}\"\n else:\n workflow_name = repository_name\n return self._clean_workflow_name(workflow_name)",
"def check_sequence_name(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n\n # do not consider referenced shot nodes\n shots = pm.ls(type=\"shot\")\n progress_controller.maximum = len(shots)\n shot = None\n for s in shots:\n if s.referenceFile() is None:\n shot = s\n break\n progress_controller.increment()\n\n progress_controller.complete()\n sequencer = shot.outputs(type=\"sequencer\")[0]\n sequence_name = sequencer.sequence_name.get()\n if sequence_name == \"\" or sequence_name is None:\n pm.select(sequencer)\n raise PublishError(\"Please enter a sequence name!!!\")",
"def adjust_event_name(event_name):\n pos=find_first_digit(event_name)\n return event_name[pos:]",
"def update_playbook_name(self, old_playbook, new_playbook):\n for key in [name for name in self.workflows.keys() if name.playbook == old_playbook]:\n self.update_workflow_name(old_playbook, key.workflow, new_playbook, key.workflow)"
]
| [
"0.6110718",
"0.59574616",
"0.5672568",
"0.5635998",
"0.55507517",
"0.55112606",
"0.54902583",
"0.5469872",
"0.5467294",
"0.5377182",
"0.532746",
"0.5306472",
"0.5283382",
"0.52668834",
"0.52513534",
"0.52153516",
"0.5213098",
"0.5209305",
"0.51679504",
"0.5163752",
"0.51610124",
"0.51358813",
"0.513507",
"0.51343703",
"0.5120478",
"0.5115094",
"0.5114491",
"0.5110074",
"0.51086175",
"0.5087229"
]
| 0.6644251 | 0 |
Route artifacts to a workflow | def placement_route_artifact(args):
clarity_epp.placement.artifact.route_to_workflow(lims, args.process_id, args.workflow) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def export_workflow(args):\n if args.type == 'magnis':\n clarity_epp.export.workflow.helix_magnis(lims, args.process_id, args.output_file)\n elif args.type == 'mip':\n clarity_epp.export.workflow.helix_mip(lims, args.process_id, args.output_file)",
"def deploy():",
"def test_deploy_workflow_definition(self):\n pass",
"def run_workflow(EMBEDDING_BASE_PATH):\n train_tweets_path, val_tweets_path, test_tweets_path, image_dataset = run_pre_workflow()\n\n input_images, train_tweets, val_tweets, test_tweets, glove_embeddings = replica_catalog(train_tweets_path, val_tweets_path, test_tweets_path, image_dataset, EMBEDDING_BASE_PATH)\n\n preprocess_tweets, preprocess_images, train_resnet, hpo_train_resnet, train_bilstm, hpo_train_bilstm, resnet_inference, bilstm_inference, late_fusion = transformation_catalog()\n \n sites_catalog()\n\n pegasus_properties()\n \n wf = Workflow('Crisis_Computing_Workflow')\n\n # --------------------------------------------------- TEXT PIPELINE ------------------------------------------------------ \n\n # Job 1: Preprocess tweets\n preprocessed_train_tweets = File('preprocessed_train_tweets.csv')\n preprocessed_val_tweets = File('preprocessed_val_tweets.csv')\n preprocessed_test_tweets = File('preprocessed_test_tweets.csv')\n \n job_preprocess_tweets = [Job(preprocess_tweets) for i in range(3)]\n job_preprocess_tweets[0].add_inputs(train_tweets)\n job_preprocess_tweets[0].add_outputs(preprocessed_train_tweets)\n job_preprocess_tweets[0].add_args('--filename', 'train_tweets.csv')\n \n job_preprocess_tweets[1].add_inputs(val_tweets)\n job_preprocess_tweets[1].add_outputs(preprocessed_val_tweets)\n job_preprocess_tweets[1].add_args('--filename', 'val_tweets.csv')\n \n job_preprocess_tweets[2].add_inputs(test_tweets)\n job_preprocess_tweets[2].add_outputs(preprocessed_test_tweets)\n job_preprocess_tweets[2].add_args('--filename', 'test_tweets.csv')\n\n\n # Job 2: HPO Bi-LSTM\n bilstm_best_params = File('best_bilstm_hpo_params.txt')\n\n job_hpo_train_bilstm = Job(hpo_train_bilstm)\\\n .add_inputs(glove_embeddings, preprocessed_train_tweets, preprocessed_val_tweets, preprocessed_test_tweets)\\\n .add_outputs(bilstm_best_params)\\\n .add_args('--trials', BILSTM_NUM_TRIALS)\n\n\n # Job 3: Train Bi-LSTM using best parameters from HPO study and output loss and accuracy curves\n trained_bilstm_model = File('bilstm_final_model.h5') \n bilstm_loss_curve = File('Loss_curve_bilstm.png')\n bilstm_accuracy_curve = File('Accuracy_curve_bilstm.png')\n\n\n job_train_bilstm = Job(train_bilstm)\\\n .add_inputs(glove_embeddings, preprocessed_train_tweets, preprocessed_val_tweets, preprocessed_test_tweets, bilstm_best_params)\\\n .add_outputs(bilstm_loss_curve, bilstm_accuracy_curve, trained_bilstm_model)\\\n\n\n # Job 4: Run inference on best Bi-LSTM model to produce output on test dataset along with confusion matrix\n bilstm_train_output_prob = File('bilstm_train_output.csv')\n bilstm_test_output_prob = File('bilstm_test_output.csv')\n bilstm_confusion_matrix = File('bilstm_confusion_matrix.png')\n\n job_bilstm_inference = Job(bilstm_inference)\\\n .add_inputs(preprocessed_train_tweets, preprocessed_val_tweets, preprocessed_test_tweets, trained_bilstm_model)\\\n .add_outputs(bilstm_train_output_prob, bilstm_test_output_prob, bilstm_confusion_matrix)\n\n\n # --------------------------------------------------- IMAGE PIPELINE ------------------------------------------------------ \n\n \n # Job 1: Preprocess images\n prefix = \"resized_\"\n job_preprocess_images = [Job(preprocess_images) for i in range(NUM_WORKERS)]\n resized_images = split_preprocess_jobs(job_preprocess_images, input_images, prefix)\n\n # Job 2: HPO ResNet-50\n resnet_best_params = File('best_resnet_hpo_params.txt')\n\n job_hpo_train_resnet = Job(hpo_train_resnet)\\\n .add_inputs(*resized_images)\\\n .add_args('--trials', RESNET_NUM_TRIALS)\\\n .add_outputs(resnet_best_params)\\\n .add_profiles(Namespace.PEGASUS, key=\"maxwalltime\", value=MAXTIMEWALL)\n\n\n # Job 3: Train ResNet-50 using best parameters from HPO study and output loss and accuracy curves\n trained_resnet_model = File('resnet_final_model.pth')\n resnet_loss_curve = File('Loss_curve_resnet.png')\n resnet_accuracy_curve = File('Accuracy_curve_resnet.png')\n\n job_train_resnet = Job(train_resnet)\\\n .add_inputs(*resized_images, resnet_best_params)\\\n .add_outputs(resnet_loss_curve, resnet_accuracy_curve, trained_resnet_model)\\\n .add_profiles(Namespace.PEGASUS, key=\"maxwalltime\", value=MAXTIMEWALL)\n\n\n # Job 4: Run inference on best ResNet-50 model to produce output on test dataset along with confusion matrix\n resnet_train_output_prob = File('resnet_train_output.csv')\n resnet_confusion_matrix = File('resnet_confusion_matrix.png')\n resnet_test_output_prob = File('resnet_test_output.csv') \n\n job_resnet_inference = Job(resnet_inference)\\\n .add_inputs(*resized_images, trained_resnet_model)\\\n .add_outputs(resnet_train_output_prob, resnet_test_output_prob, resnet_confusion_matrix)\n\n \n \n # --------------------------------------------------- LATE FUSION ------------------------------------------------------ \n\n # Job 1: Late Fusion\n confusion_matrix_MPC = File('late_fusion_MPC.png')\n confusion_matrix_LR = File('late_fusion_LR.png')\n confusion_matrix_MLP = File('late_fusion_MLP.png')\n report_MLP = File('late_fusion_MLP.csv')\n report_MPC = File('late_fusion_MPC.csv')\n report_LR = File('late_fusion_LR.csv')\n\n job_late_fusion = Job(late_fusion)\\\n .add_inputs(resnet_train_output_prob, resnet_test_output_prob, bilstm_train_output_prob, bilstm_test_output_prob)\\\n .add_outputs(confusion_matrix_MPC, confusion_matrix_LR, confusion_matrix_MLP, report_MLP, report_MPC, report_LR)\n\n wf.add_jobs(*job_preprocess_tweets, *job_preprocess_images, job_bilstm_inference, job_hpo_train_bilstm, job_train_bilstm, job_hpo_train_resnet, job_train_resnet, job_resnet_inference, job_late_fusion)\n\n try:\n wf.plan(submit=False, sites=[\"donut\"], output_sites=[\"donut\"], dir=\"submit\")\n #wf.wait()\n #wf.statistics()\n except PegasusClientError as e:\n print(e.output)\n \n #plot_workflow_graph(wf)\n \n return",
"def run_workflow(workflow_log_id):\n outputs = {}\n protocol = \"tcp\"\n\n workflow_log = WorkflowLog.objects.get(id=workflow_log_id)\n worker = workflow_log.performed_on\n\n WORKER_ENDPOINT = \"%s://%s:%s\" % (protocol, worker.ip, str(worker.port))\n WORKER_SECRET_KEY = worker.secret_key\n\n conn = BotConnection(WORKER_ENDPOINT, WORKER_SECRET_KEY)\n conn.connect()\n\n # Make a JSON\n request_header = {'workflow_log_id': workflow_log.id,\n 'workflow': slugify(workflow_log.workflow.title),\n 'workflow_log_time': workflow_log.date_created.strftime('%Y%m%d-%H%M%S'),\n 'script': {},\n 'hooks': {}, # see doc/HOOKS.md\n }\n\n # hooks for this workflow\n if workflow_log.workflow.pre_task:\n request_header['hooks']['pre_task'] = workflow_log.workflow.pre_task\n\n if workflow_log.workflow.post_task:\n request_header['hooks']['post_task'] = workflow_log.workflow.post_task\n\n ordered_workflows = order_workflow_tasks(workflow_log.workflow)\n\n workflow_log.date_started = timezone.now()\n for idx, workflow_task in enumerate(ordered_workflows):\n template = render_template(workflow_log, workflow_task)\n\n if workflow_task.task.is_builtin:\n m = importCode(template, \"test\")\n output = {}\n output['stdout'] = str(m.run())\n output['exit_code'] = workflow_log.SUCCESS\n else:\n request = request_header\n request['script']['id'] = idx\n request['script']['body'] = template\n\n output = send_script(request, conn)\n\n outputs['%i_%s' % (workflow_task.id, workflow_task.task.title)] = output\n\n # loop over all next wf_tasks and add this scripts output to inputs\n current = workflow_task\n while current.next_workflow_task:\n current = current.next_workflow_task\n\n # deepcopy dict to prevent runtime error\n inp = deepcopy(workflow_log.inputs)\n # loop key, value pairs and look if this output needs to be set as input\n for key, value in inp[str(current.id)]['string'].iteritems():\n if value == 'output_%s' % str(workflow_task.id):\n workflow_log.inputs[str(current.id)]['string'][key] = output['stdout']\n\n if 'exit_code' not in output or output['exit_code'] is not workflow_log.SUCCESS:\n workflow_log.exit_code = workflow_log.ERROR\n workflow_log.save()\n break\n else:\n workflow_log.exit_code = workflow_log.SUCCESS\n\n conn.close()\n\n workflow_log.date_finished = timezone.now()\n workflow_log.outputs = outputs\n workflow_log.save()\n\n # Notify user in case of failure\n if workflow_log.exit_code == workflow_log.ERROR:\n send_failiure_notification(workflow_log)",
"def start_workflow(self, **params):\n raise NotImplementedError",
"def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-w\", \"--workflow_path\", help='Path to workflow file')\n parser.add_argument(\"-g\", \"--galaxy\",\n dest=\"galaxy_url\",\n help=\"Target Galaxy instance URL/IP address (required \"\n \"if not defined in the tools list file)\",)\n parser.add_argument(\"-a\", \"--apikey\",\n dest=\"api_key\",\n help=\"Galaxy admin user API key (required if not \"\n \"defined in the tools list file)\",)\n args = parser.parse_args()\n\n gi = galaxy.GalaxyInstance(url=args.galaxy_url, key=args.api_key)\n\n with open(args.workflow_path, 'r') as wf_file:\n import_uuid = json.load(wf_file).get('uuid')\n existing_uuids = [d.get('latest_workflow_uuid') for d in gi.workflows.get_workflows()]\n if import_uuid not in existing_uuids:\n gi.workflows.import_workflow_from_local_path(args.workflow_path)",
"def init_workflow():\n pass",
"def test_libraryrun_workflow_link(self):\n mock_sqr: SequenceRun = SequenceRunFactory()\n\n mock_libraryrun: LibraryRun = LibraryRunFactory()\n\n # Change library_id to match metadata\n mock_libraryrun.library_id = \"L2000001\"\n mock_libraryrun.save()\n result: dict = bcl_convert.handler({\n 'gds_volume_name': mock_sqr.gds_volume_name,\n 'gds_folder_path': mock_sqr.gds_folder_path,\n 'seq_run_id': mock_sqr.run_id,\n 'seq_name': mock_sqr.name,\n }, None)\n\n logger.info(\"-\" * 32)\n logger.info(\"Example bcl_convert.handler lambda output:\")\n logger.info(json.dumps(result))\n\n # assert bcl convert workflow launch success and save workflow run in db\n workflow = Workflow.objects.get(id=result['id'])\n\n # Grab library run for particular workflow\n library_run_in_workflows = workflow.libraryrun_set.all()\n\n self.assertEqual(1, library_run_in_workflows.count())",
"def fetch(self) -> None:\n workflow_spec_path = os.path.join(self._output_dir, self._spec)\n self._download_file(self._parsed_url.original_url, workflow_spec_path)",
"def run_gfs_workflow(threddspath):\n wrksppath = os.path.join(\n os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'tethysapp', 'ffgs', 'workspaces', 'app_workspace')\n\n # enable logging to track the progress of the workflow and for debugging\n logpath = os.path.join(threddspath, 'workflow.log')\n logging.basicConfig(filename=logpath, filemode='w', level=logging.INFO, format='%(message)s')\n logging.info('Workflow initiated on ' + datetime.datetime.utcnow().strftime(\"%D at %R\"))\n\n # start the workflow by setting the environment\n timestamp, redundant = setenvironment(threddspath, wrksppath)\n model = 'gfs'\n\n # if this has already been done for the most recent forecast, abort the workflow\n if redundant:\n logging.info('\\nWorkflow aborted on ' + datetime.datetime.utcnow().strftime(\"%D at %R\"))\n return 'Workflow Aborted- already run for most recent data'\n\n # run the workflow for each region, for each model in that region\n for region in FFGS_REGIONS:\n logging.info('\\nBeginning to process ' + region[1] + ' on ' + datetime.datetime.utcnow().strftime(\"%D at %R\"))\n # download each forecast model, convert them to netcdfs and tiffs\n succeeded = download_gfs(threddspath, timestamp, region[1], model)\n if not succeeded:\n return 'Workflow Aborted- Downloading Errors Occurred'\n gfs_tiffs(threddspath, wrksppath, timestamp, region[1], model)\n resample(wrksppath, region[1], model)\n # the geoprocessing functions\n zonal_statistics(wrksppath, timestamp, region[1], model)\n nc_georeference(threddspath, timestamp, region[1], model)\n # generate color scales and ncml aggregation files\n new_ncml(threddspath, timestamp, region[1], model)\n new_colorscales(wrksppath, region[1], model)\n # cleanup the workspace by removing old files\n cleanup(threddspath, timestamp, region[1], model)\n\n logging.info('\\nAll regions finished- writing the timestamp used on this run to a txt file')\n with open(os.path.join(threddspath, 'gfs_timestamp.txt'), 'w') as file:\n file.write(timestamp)\n\n logging.info('\\n\\nGFS Workflow completed successfully on ' + datetime.datetime.utcnow().strftime(\"%D at %R\"))\n logging.info('If you have configured other models, they will begin processing now.\\n\\n\\n')\n\n return 'GFS Workflow Completed- Normal Finish'",
"def deploy():\n build()\n collect()\n commit()\n push()",
"def transformation_catalog():\n tc = TransformationCatalog()\n\n # Add docker container\n #crisis_container = Container(\n # 'crisis_container',\n # Container.DOCKER,\n # image = \"docker://slnagark/crisis_wf:latest\",\n # arguments=\"--runtime=nvidia --shm-size=1gb\"\n # ).add_env(TORCH_HOME=\"/tmp\")\n \n crisis_container = Container(\n 'galaxy_container',\n Container.SINGULARITY,\n image = str(Path(\".\").parent.resolve() / \"containers/crisis-computing_latest.sif\"),\n image_site = \"local\",\n mounts=[\"${DONUT_USER_HOME}:${DONUT_USER_HOME}\"]\n ).add_env(TORCH_HOME=\"/tmp\")\n\n\n # preprocessing scripts\n preprocess_images = Transformation(\n \"preprocess_images\",\n site = \"local\",\n pfn = os.path.join(os.getcwd(), \"bin/preprocess_images.py\"), \n is_stageable = True,\n container=crisis_container\n )\n\n preprocess_tweets = Transformation(\n \"preprocess_tweets\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/preprocess_tweets.py\"), \n is_stageable = True,\n container=crisis_container\n )\n\n \n # HPO, training and inference scripts for ResNet-50\n hpo_train_resnet = Transformation(\n \"hpo_train_resnet\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/hpo_train_resnet.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n train_resnet = Transformation(\n \"train_resnet\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/train_resnet.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n resnet_inference = Transformation(\n \"resnet_inference\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/resnet_inference.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n # HPO, training and inference scripts for Bi-LSTM\n\n hpo_train_bilstm = Transformation(\n \"hpo_train_bilstm\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/hpo_train_bilstm.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n #.add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n train_bilstm = Transformation(\n \"train_bilstm\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/train_bilstm.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n \n bilstm_inference = Transformation(\n \"bilstm_inference\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/bilstm_inference.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n # late fusion script\n late_fusion = Transformation(\n \"late_fusion\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/late_fusion.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n\n tc.add_containers(crisis_container)\n tc.add_transformations(preprocess_tweets, preprocess_images, train_resnet, hpo_train_resnet, train_bilstm, hpo_train_bilstm, resnet_inference, bilstm_inference, late_fusion)\n tc.write()\n\n return preprocess_tweets, preprocess_images, train_resnet, hpo_train_resnet, train_bilstm, hpo_train_bilstm, resnet_inference, bilstm_inference, late_fusion",
"def execute(self) -> None:\n env_vars['AWS_DEFAULT_REGION'] = self.task.parsed_payload['sagemaker_region']\n\n steps: tuple = (\n self._assume_service_account_role,\n self._retrieve_model_binary_stream_from_db, # Retrieve Model BLOB\n self._deserialize_artifact_stream, # Deserialize it to the Disk\n self._deploy_model_to_sagemaker, # Deploy model to SageMaker\n )\n\n for execute_step in steps:\n execute_step()",
"async def main():\n \n # workflow status\n global status\n\n # Mode says which objects must be archived: DB dump, source files or both.\n try:\n mode=sys.argv[1]\n except IndexError:\n mode = 'all'\n\n # queue of files to be archived\n files_to_upload = deque()\n \n logger.trace(\"Archiving ...\")\n # Tasks to archive files and database dump\n list_of_threads = get_list_of_threads(mode=mode)\n\n tar_names = await asyncio.gather(*list_of_threads)\n\n # Clear names list, removing None elements if exist\n tar_names = [name for name in tar_names if name]\n\n files_to_upload.extend(tar_names)\n logger.trace(\"Ok.\")\n\n logger.trace(\"Uploading ...\")\n\n # Connect to the ftp-server and upload the archived files.\n await upload_to_ftp_server(host=FTP.SERVER.value,\n port=FTP.PORT.value,\n login=FTP.LOGIN.value,\n password=FTP.PASSWORD.value,\n files=files_to_upload)\n\n # Remove archived and dump files on the server site.\n clear_garbage(mode=mode, files=tar_names)\n\n # Check the workflow status. If it's not empty, send an error email.\n if len(status) > 0 and ERROR_NOTIFICATION_BY_EMAIL:\n backup_email()",
"def main():\n with open('config.json') as config_file:\n configs = json.load(config_file)\n\n jar_list = utilities.upload_jars(configs)\n utilities.sign_jars(configs)\n\n artifact_folder = utilities.prepare_artifacts(configs, jar_list)\n\n repo_id = utilities.create_staging_repo(configs)\n utilities.deploy_to_staging_repo(configs, artifact_folder, repo_id)\n utilities.close_staging_repo(configs, repo_id)",
"def task_deploy():\n client = boto3.client(\"lambda\")\n\n def upload_build():\n if function_exists(client):\n update_lambda_function(client)\n else:\n create_lambda_function(client)\n\n return {\"actions\": [upload_build], \"file_dep\": [f\"{DIST_DIR}/build.zip\"]}",
"def test_compute_dependencies(self):\n workflow = self.get_workflow(\n \"\"\"file://file2 <- file://file1\n Original code\n\nfile://file3 <- file://file1\n\n\"\"\")\n workflow.compute_dependencies()\n assert workflow.resources['file://file1'].dependant_processes == [workflow._processes[0],\n workflow._processes[1]]",
"def invoke_do_workflow(workflow_name, workflow_object, logger):\n try:\n success = workflow_object.do_workflow()\n except Exception:\n success = None\n logger.error(\"error processing workflow %s\", workflow_name, exc_info=True)\n\n # Print the result to the log\n if success:\n logger.info(\"%s success %s\" % (workflow_name, success))",
"def run_workflow(args, run=True):\n\n import os\n import os.path as op\n\n import nipype.interfaces.io as nio\n import nipype.pipeline.engine as pe\n import nipype.interfaces.utility as niu\n\n import qap\n from qap_utils import read_json\n\n import glob\n\n import time\n from time import strftime\n from nipype import config as nyconfig\n\n # unpack args\n resource_pool_dict, sub_info_list, config, run_name, runargs, \\\n bundle_idx, num_bundles = args\n\n # Read and apply general settings in config\n keep_outputs = config.get('write_all_outputs', False)\n\n # take date+time stamp for run identification purposes\n pipeline_start_stamp = strftime(\"%Y-%m-%d_%H:%M:%S\")\n pipeline_start_time = time.time()\n\n if \"workflow_log_dir\" not in config.keys():\n config[\"workflow_log_dir\"] = config[\"output_directory\"]\n\n bundle_log_dir = op.join(config[\"workflow_log_dir\"],\n '_'.join([\"bundle\", str(bundle_idx)]))\n\n try:\n os.makedirs(bundle_log_dir)\n except:\n if not op.isdir(bundle_log_dir):\n err = \"[!] Bundle log directory unable to be created.\\n\" \\\n \"Path: %s\\n\\n\" % bundle_log_dir\n raise Exception(err)\n else:\n pass\n\n # set up logging\n nyconfig.update_config(\n {'logging': {'log_directory': bundle_log_dir, 'log_to_file': True}})\n logging.update_logging(nyconfig)\n\n logger.info(\"QAP version %s\" % qap.__version__)\n logger.info(\"Pipeline start time: %s\" % pipeline_start_stamp)\n\n workflow = pe.Workflow(name=run_name)\n workflow.base_dir = op.join(config[\"working_directory\"])\n\n # set up crash directory\n workflow.config['execution'] = \\\n {'crashdump_dir': config[\"output_directory\"]}\n\n # create the one node all participants will start from\n starter_node = pe.Node(niu.Function(input_names=['starter'], \n output_names=['starter'], \n function=starter_node_func),\n name='starter_node')\n\n # set a dummy variable\n starter_node.inputs.starter = \"\"\n\n new_outputs = 0\n\n # iterate over each subject in the bundle\n logger.info(\"Starting bundle %s out of %s..\" % (str(bundle_idx),\n str(num_bundles)))\n # results dict\n rt = {'status': 'Started', 'bundle_log_dir': bundle_log_dir}\n\n for sub_info in sub_info_list:\n\n resource_pool = resource_pool_dict[sub_info]\n\n # in case we're dealing with string entries in the data dict\n try:\n resource_pool.keys()\n except AttributeError:\n continue\n\n # resource pool check\n invalid_paths = []\n\n for resource in resource_pool.keys():\n try:\n if not op.isfile(resource_pool[resource]) and resource != \"site_name\":\n invalid_paths.append((resource, resource_pool[resource]))\n except:\n err = \"\\n\\n[!]\"\n raise Exception(err)\n\n if len(invalid_paths) > 0:\n err = \"\\n\\n[!] The paths provided in the subject list to the \" \\\n \"following resources are not valid:\\n\"\n\n for path_tuple in invalid_paths:\n err = \"%s%s: %s\\n\" % (err, path_tuple[0], path_tuple[1])\n\n err = \"%s\\n\\n\" % err\n raise Exception(err)\n\n # process subject info\n sub_id = str(sub_info[0])\n # for nipype\n if \"-\" in sub_id:\n sub_id = sub_id.replace(\"-\",\"_\")\n if \".\" in sub_id:\n sub_id = sub_id.replace(\".\",\"_\")\n\n if sub_info[1]:\n session_id = str(sub_info[1])\n # for nipype\n if \"-\" in session_id:\n session_id = session_id.replace(\"-\",\"_\")\n if \".\" in session_id:\n session_id = session_id.replace(\".\",\"_\")\n else:\n session_id = \"session_0\"\n\n if sub_info[2]:\n scan_id = str(sub_info[2])\n # for nipype\n if \"-\" in scan_id:\n scan_id = scan_id.replace(\"-\",\"_\")\n if \".\" in scan_id:\n scan_id = scan_id.replace(\".\",\"_\")\n else:\n scan_id = \"scan_0\"\n\n name = \"_\".join([\"\", sub_id, session_id, scan_id])\n\n rt[name] = {'id': sub_id, 'session': session_id, 'scan': scan_id,\n 'resource_pool': str(resource_pool)}\n\n logger.info(\"Participant info: %s\" % name)\n\n # set output directory\n output_dir = op.join(config[\"output_directory\"], run_name,\n sub_id, session_id, scan_id)\n\n try:\n os.makedirs(output_dir)\n except:\n if not op.isdir(output_dir):\n err = \"[!] Output directory unable to be created.\\n\" \\\n \"Path: %s\\n\\n\" % output_dir\n raise Exception(err)\n else:\n pass\n\n # for QAP spreadsheet generation only\n config.update({\"subject_id\": sub_id, \"session_id\": session_id,\n \"scan_id\": scan_id, \"run_name\": run_name})\n\n if \"site_name\" in resource_pool:\n config.update({\"site_name\": resource_pool[\"site_name\"]})\n\n logger.info(\"Configuration settings:\\n%s\" % str(config))\n\n qap_types = [\"anatomical_spatial\", \n \"functional_spatial\", \n \"functional_temporal\"]\n\n # update that resource pool with what's already in the output\n # directory\n for resource in os.listdir(output_dir):\n if (op.exists(op.join(output_dir, resource)) and\n resource not in resource_pool.keys()):\n try:\n resource_pool[resource] = \\\n glob.glob(op.join(output_dir, resource, \"*\"))[0]\n except IndexError:\n if \".json\" in resource:\n # load relevant json info into resource pool\n json_file = op.join(output_dir, resource)\n json_dict = read_json(json_file)\n sub_json_dict = json_dict[\"%s %s %s\" % (sub_id,\n session_id,\n scan_id)]\n\n if \"anatomical_header_info\" in sub_json_dict.keys():\n resource_pool[\"anatomical_header_info\"] = \\\n sub_json_dict[\"anatomical_header_info\"]\n\n if \"functional_header_info\" in sub_json_dict.keys():\n resource_pool[\"functional_header_info\"] = \\\n sub_json_dict[\"functional_header_info\"]\n\n for qap_type in qap_types:\n if qap_type in sub_json_dict.keys():\n resource_pool[\"_\".join([\"qap\",qap_type])] = \\\n sub_json_dict[qap_type]\n except:\n # a stray file in the sub-sess-scan output directory\n pass\n\n # create starter node which links all of the parallel workflows within\n # the bundle together as a Nipype pipeline\n resource_pool[\"starter\"] = (starter_node, 'starter')\n\n # individual workflow and logger setup\n logger.info(\"Contents of resource pool for this participant:\\n%s\"\n % str(resource_pool))\n\n # start connecting the pipeline\n qw = None\n for qap_type in qap_types:\n if \"_\".join([\"qap\", qap_type]) not in resource_pool.keys():\n if qw is None:\n from qap import qap_workflows as qw\n wf_builder = \\\n getattr(qw, \"_\".join([\"qap\", qap_type, \"workflow\"]))\n workflow, resource_pool = wf_builder(workflow, resource_pool,\n config, name)\n\n if (\"anatomical_scan\" in resource_pool.keys()) and \\\n (\"anatomical_header_info\" not in resource_pool.keys()):\n if qw is None:\n from qap import qap_workflows as qw\n workflow, resource_pool = \\\n qw.qap_gather_header_info(workflow, resource_pool, config,\n name, \"anatomical\")\n\n if (\"functional_scan\" in resource_pool.keys()) and \\\n (\"functional_header_info\" not in resource_pool.keys()):\n if qw is None:\n from qap import qap_workflows as qw\n workflow, resource_pool = \\\n qw.qap_gather_header_info(workflow, resource_pool, config,\n name, \"functional\")\n\n # set up the datasinks\n out_list = []\n for output in resource_pool.keys():\n for qap_type in qap_types:\n if qap_type in output:\n out_list.append(\"_\".join([\"qap\", qap_type]))\n\n # write_all_outputs (writes everything to the output directory, not\n # just the final JSON files)\n if keep_outputs:\n out_list = resource_pool.keys()\n logger.info(\"Outputs we're keeping: %s\" % str(out_list))\n logger.info('Resource pool keys after workflow connection: '\n '{}'.format(str(resource_pool.keys())))\n\n # Save reports to out_dir if necessary\n if config.get('write_report', False):\n\n if (\"qap_mosaic\" in resource_pool.keys()) and \\\n (\"qap_mosaic\" not in out_list):\n out_list += ['qap_mosaic']\n\n # The functional temporal also has an FD plot\n if 'qap_functional_temporal' in resource_pool.keys():\n if (\"qap_fd\" in resource_pool.keys()) and \\\n (\"qap_fd\" not in out_list):\n out_list += ['qap_fd']\n\n for output in out_list:\n # we use a check for len()==2 here to select those items in the\n # resource pool which are tuples of (node, node_output), instead\n # of the items which are straight paths to files\n\n # resource pool items which are in the tuple format are the\n # outputs that have been created in this workflow because they\n # were not present in the subject list YML (the starting resource\n # pool) and had to be generated\n if (len(resource_pool[output]) == 2) and (output != \"starter\"):\n ds = pe.Node(nio.DataSink(), name='datasink_%s%s'\n % (output,name))\n ds.inputs.base_directory = output_dir\n node, out_file = resource_pool[output]\n workflow.connect(node, out_file, ds, output)\n new_outputs += 1\n elif \".json\" in resource_pool[output]:\n new_outputs += 1\n\n logger.info(\"New outputs: %s\" % str(new_outputs))\n\n # run the pipeline (if there is anything to do)\n if new_outputs > 0:\n if config.get('write_graph', False):\n workflow.write_graph(\n dotfilename=op.join(config[\"output_directory\"],\n \"\".join([run_name, \".dot\"])),\n simple_form=False)\n workflow.write_graph(\n graph2use=\"orig\",\n dotfilename=op.join(config[\"output_directory\"],\n \"\".join([run_name, \".dot\"])),\n simple_form=False)\n workflow.write_graph(\n graph2use=\"hierarchical\",\n dotfilename=op.join(config[\"output_directory\"],\n \"\".join([run_name, \".dot\"])),\n simple_form=False)\n if run:\n try:\n logger.info(\"Running with plugin %s\" % runargs[\"plugin\"])\n logger.info(\"Using plugin args %s\" % runargs[\"plugin_args\"])\n workflow.run(plugin=runargs[\"plugin\"],\n plugin_args=runargs[\"plugin_args\"])\n rt['status'] = 'finished'\n logger.info(\"Workflow run finished for bundle %s.\"\n % str(bundle_idx))\n except Exception as e: # TODO We should be more specific here ...\n errmsg = e\n rt.update({'status': 'failed'})\n logger.info(\"Workflow run failed for bundle %s.\"\n % str(bundle_idx))\n # ... however this is run inside a pool.map: do not raise\n # Exception\n else:\n return workflow\n\n else:\n rt['status'] = 'cached'\n logger.info(\"\\nEverything is already done for bundle %s.\"\n % str(bundle_idx))\n\n # Remove working directory when done\n if not keep_outputs:\n try:\n work_dir = op.join(workflow.base_dir, scan_id)\n\n if op.exists(work_dir):\n import shutil\n shutil.rmtree(work_dir)\n except:\n logger.warn(\"Couldn\\'t remove the working directory!\")\n pass\n\n if rt[\"status\"] == \"failed\":\n logger.error(errmsg)\n else:\n pipeline_end_stamp = strftime(\"%Y-%m-%d_%H:%M:%S\")\n pipeline_end_time = time.time()\n logger.info(\"Elapsed time (minutes) since last start: %s\"\n % ((pipeline_end_time - pipeline_start_time) / 60))\n logger.info(\"Pipeline end time: %s\" % pipeline_end_stamp)\n\n return rt",
"def __init__(self, workflow):\n self.workflow = workflow",
"def project_created_handler(event):\n obj = event.obj\n # submit Project after creation\n obj.workflow.start()",
"def extract_artifacts (self, layout):\n print('Extracting artifacts according to layout:')\n for path, afs in layout.items():\n artifact = afs[0][0]\n member = afs[0][1]\n print(' %s (from %s) -> %s' % (member, artifact, path))\n outf = os.path.join(self.stpath, path)\n zfile.ZFile.extract(artifact.lpath, member, outf)\n\n self.add_file(outf)\n\n # Rename files, if needed.\n for root, _, filenames in os.walk(self.stpath):\n for filename in filenames:\n fname = os.path.basename(filename)\n if fname in rename_files:\n bpath = os.path.join(root, os.path.dirname(filename))\n oldfile = os.path.join(bpath, fname)\n newfile = os.path.join(bpath, rename_files[fname])\n print('Renaming %s -> %s' % (oldfile, newfile))\n os.rename(oldfile, newfile)\n\n # And rename them in the files map too\n rename_these = [x for x in self.files.keys() if os.path.basename(x) in rename_files]\n for oldfile in rename_these:\n newfile = os.path.join(os.path.dirname(oldfile),\n rename_files[os.path.basename(oldfile)])\n self.files[newfile] = self.files[oldfile]\n del self.files[oldfile]",
"def deploy(self, topology):\n print \"ABC - Deployer.deploy()\"",
"def test_workflows_get(self):\n pass",
"def test_workflows_get(self):\n pass",
"def step_async(self, actions):",
"def workflow(site):\n click.echo(\"Preparing input for site {}\".format(site))\n key_store = KeyStore(get_config_file())\n if key_store.does_site_exist(site):\n flow.generate_mesh()\n flow.generate_control_files()\n flow.compress_input()\n flow.encrypt_input()\n flow.transfer_files()\n else:\n click.echo(\"Site {} does not exist in keystore, please add_site\".format(site))",
"def run_download_and_se_pe_illumina_covid_variation_workflow(accessions_file, nc_45512_2_fasta_file, api_key, galaxy_url, history_id, new_history_name, publish, monitor_invocation, workflow_id_override, quiet):\n if quiet:\n log.setLevel(logging.ERROR)\n gi = bioblend.galaxy.GalaxyInstance(galaxy_url, api_key)\n workflow_id = workflow_id_override or '69535d67-8a64-4ba5-a12f-886873e1e236'\n try:\n gi.workflows.show_workflow(workflow_id)\n except Exception:\n if FULL_WORKFLOW:\n gi.workflows.import_workflow_dict(FULL_WORKFLOW)\n log.info(\"Imported workflow\")\n else:\n raise Exception(\"Workflow with id '%s' not uploaded or not accessible\" % workflow_id)\n if history_id is None:\n if new_history_name is None:\n new_history_name = 'History for Download and SE+PE Illumina Covid Variation Workflow execution'\n history = gi.histories.create_history(name=new_history_name)\n else:\n history = gi.histories.show_history(history_id)\n history_id = history['id']\n if publish:\n history = gi.histories.update_history(history_id, published=True, importable=True)\n history_url = urljoin(galaxy_url, history['username_and_slug'])\n else:\n history_url = urljoin(galaxy_url, \"histories/view?id=%s\" % history_id)\n # This will look a bit awkward,\n # those could become custom click types\n datasets_to_upload = [accessions_file, nc_45512_2_fasta_file]\n upload_paths = {}\n for dataset in datasets_to_upload:\n log.info(\"Uploading dataset '%s' to history %s\", dataset, history_url)\n r = gi.tools.upload_file(path=dataset, history_id=history_id, to_posix_lines=False)\n upload_paths[dataset] = {'src': 'hda', 'id': r['outputs'][0]['id']}\n inputs = {\n 'Accessions file': upload_paths.get(accessions_file, accessions_file),\n 'NC_45512.2 fasta file': upload_paths.get(nc_45512_2_fasta_file, nc_45512_2_fasta_file),\n }\n invocation = run_workflow(gi, workflow_id, history_id, inputs)\n assert invocation['state'] == 'new', \"Expected invocation state to be 'new', but invocation is %s\" % invocation\n log.info(\"Workflow '%s' started with invocation id '%s'. Results will appear in history %s\", workflow_id, invocation['id'], history_url)",
"def mock_workflow():\n\n workflow = DockerBuildWorkflow(\"mock:default_built\", source=SOURCE)\n workflow.source = StubSource()\n builder = StubInsideBuilder().for_workflow(workflow)\n builder.set_df_path('/mock-path')\n base_image_name = ImageName.parse(\"mock:tag\")\n builder.parent_images[ImageName.parse(\"mock:base\")] = base_image_name\n builder.base_image = base_image_name\n builder.tasker = flexmock()\n workflow.builder = flexmock(builder)\n\n return workflow"
]
| [
"0.56289124",
"0.5347213",
"0.53341836",
"0.5324889",
"0.52798414",
"0.52773035",
"0.52421165",
"0.5202736",
"0.5156403",
"0.5149803",
"0.5113915",
"0.5069018",
"0.5048605",
"0.5044188",
"0.50245404",
"0.49843836",
"0.4972901",
"0.49722424",
"0.49616715",
"0.4956883",
"0.4941695",
"0.49379745",
"0.49328625",
"0.49107486",
"0.49057603",
"0.49057603",
"0.49031082",
"0.48773256",
"0.48763242",
"0.48741174"
]
| 0.73709744 | 0 |
Complete protocol step (Dx Mark protocol complete). | def placement_complete_step(args):
clarity_epp.placement.step.finish_protocol_complete(lims, args.process_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def declareDone(self, cmd):\n pass",
"def complete(self):\n pass",
"def end(self):\n self.my_print(\"\\t[DONE]\", msg_types.INFO)\n self.in_progress = False",
"def processCompleteMessage(self, msg):\r\n self._connection.receivedMessage(msg)",
"def completion() -> None:",
"def Done(self):\n pass",
"def grabComplete(self): #$NON-NLS-1$\r",
"def complete(self):\n self._is_complete = True",
"def finished(self, reply):\n pass",
"def worker_runtest_protocol_complete(self, node, item_index, duration):\n self.sched.mark_test_complete(node, item_index, duration)",
"def on_pes_packet_complete(self):\n pass",
"def complete_run():\n pass",
"def step(self):\r\n cmd = struct.pack('>B', 54)\r\n self.send(cmd)",
"def onDone(self):\n pass",
"def end_phase():\n pass",
"def state_end_sub(self, byte):\n self.telnet_cmd.append(byte)\n if byte == 240:\n self.handle_telnet_cmd(self.telnet_cmd)\n self.next_fn = self.state_text\n else:\n self.next_fn = self.state_sub",
"def complete_cmd(self):\r\n if self.select_cmd is not None:\r\n self.do_cmd()",
"def succeeded(self):\n self.did_end = True",
"def command_done(self):\n return self.read(\"*OPC?\") == \"1\"",
"def done(self):",
"def done(self):",
"def job_step_complete(self, job_request_payload):\n if job_request_payload.success_command == JobCommands.STORE_JOB_OUTPUT_COMPLETE:\n raise ValueError(\"Programmer error use use job_step_store_output_complete instead.\")\n payload = JobStepCompletePayload(job_request_payload)\n self.send(job_request_payload.success_command, payload)",
"def proceed(self):\n pass",
"def isComplete(self): #$NON-NLS-1$\r",
"def end(c: Composition) -> None:\n c.run(\"testdrive\", \"verify-data.td\")",
"async def dsay(self):\r\n await self.esay(\"Done.\")",
"def finish():\n pass",
"def completeStep(self):\n self.chunk_percentage[self.current_step - 1] = self.current_chunk_size\n self.progress_updated.emit(self.percentage)\n if self.total_steps == self.current_step:\n self.initialized = False",
"def __complete(self):\n # First propagate halt message to all neighbours\n print_level('debug', self.node_id,\n 'Sending halt to all branch neighbors')\n for _in in range(self.num_neighbors):\n edge = self.edges[_in]\n if edge.get_status() == EdgeStatus.branch:\n self.__edge_stub(_in, Message.halt)\n self.msg_q.close() \n self.completed = True",
"def finish(self):\n pass"
]
| [
"0.660435",
"0.6354931",
"0.621701",
"0.60923034",
"0.60668945",
"0.6024917",
"0.5988484",
"0.5959301",
"0.5930279",
"0.5911042",
"0.59010434",
"0.5883401",
"0.5859091",
"0.58490753",
"0.58020234",
"0.58004105",
"0.57969505",
"0.57882637",
"0.5739414",
"0.57338345",
"0.57338345",
"0.5727583",
"0.5724425",
"0.5686149",
"0.5685405",
"0.5681912",
"0.56585413",
"0.56564593",
"0.56562173",
"0.5654826"
]
| 0.7127411 | 0 |
Placement tecan process, distribute artifacts over two containers | def placement_tecan(args):
clarity_epp.placement.tecan.place_artifacts(lims, args.process_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def transformation_catalog():\n tc = TransformationCatalog()\n\n # Add docker container\n #crisis_container = Container(\n # 'crisis_container',\n # Container.DOCKER,\n # image = \"docker://slnagark/crisis_wf:latest\",\n # arguments=\"--runtime=nvidia --shm-size=1gb\"\n # ).add_env(TORCH_HOME=\"/tmp\")\n \n crisis_container = Container(\n 'galaxy_container',\n Container.SINGULARITY,\n image = str(Path(\".\").parent.resolve() / \"containers/crisis-computing_latest.sif\"),\n image_site = \"local\",\n mounts=[\"${DONUT_USER_HOME}:${DONUT_USER_HOME}\"]\n ).add_env(TORCH_HOME=\"/tmp\")\n\n\n # preprocessing scripts\n preprocess_images = Transformation(\n \"preprocess_images\",\n site = \"local\",\n pfn = os.path.join(os.getcwd(), \"bin/preprocess_images.py\"), \n is_stageable = True,\n container=crisis_container\n )\n\n preprocess_tweets = Transformation(\n \"preprocess_tweets\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/preprocess_tweets.py\"), \n is_stageable = True,\n container=crisis_container\n )\n\n \n # HPO, training and inference scripts for ResNet-50\n hpo_train_resnet = Transformation(\n \"hpo_train_resnet\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/hpo_train_resnet.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n train_resnet = Transformation(\n \"train_resnet\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/train_resnet.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n resnet_inference = Transformation(\n \"resnet_inference\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/resnet_inference.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n # HPO, training and inference scripts for Bi-LSTM\n\n hpo_train_bilstm = Transformation(\n \"hpo_train_bilstm\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/hpo_train_bilstm.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n #.add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n train_bilstm = Transformation(\n \"train_bilstm\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/train_bilstm.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n \n bilstm_inference = Transformation(\n \"bilstm_inference\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/bilstm_inference.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n # late fusion script\n late_fusion = Transformation(\n \"late_fusion\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/late_fusion.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n\n tc.add_containers(crisis_container)\n tc.add_transformations(preprocess_tweets, preprocess_images, train_resnet, hpo_train_resnet, train_bilstm, hpo_train_bilstm, resnet_inference, bilstm_inference, late_fusion)\n tc.write()\n\n return preprocess_tweets, preprocess_images, train_resnet, hpo_train_resnet, train_bilstm, hpo_train_bilstm, resnet_inference, bilstm_inference, late_fusion",
"def deploy():",
"def docker_worker():",
"def dist():\n PackCommandExecutor().pack()\n DistCommandExecutor().dist()",
"def main():\n ############################################################################\n # Docker setup #\n ############################################################################\n ip = {\"ROBOT_IP\": ROBOT_IPS[fab_conf[\"target\"].as_str()]}\n compose_up(DOCKER_COMPOSE_PATHS[\"driver\"], check_output=True, env_vars=ip)\n log.debug(\"Driver services are running.\")\n\n ############################################################################\n # Load fabrication data #\n ############################################################################\n fab_json_path = fab_conf[\"paths\"][\"fab_data_path\"].as_path()\n clay_bullets = load_bullets(fab_json_path)\n\n log.info(\"Fabrication data read from: {}\".format(fab_json_path))\n log.info(\"{} items in clay_bullets.\".format(len(clay_bullets)))\n\n pick_station_json = fab_conf[\"paths\"][\"pick_conf_path\"].as_path()\n with pick_station_json.open(mode=\"r\") as fp:\n pick_station_data = json.load(fp)\n pick_station = PickStation.from_data(pick_station_data)\n\n ############################################################################\n # Create Ros Client #\n ############################################################################\n ros = RosClient()\n\n ############################################################################\n # Create ABB Client #\n ############################################################################\n abb = AbbClient(ros)\n abb.run()\n log.debug(\"Connected to ROS\")\n\n check_reconnect(\n abb,\n driver_container_name=DRIVER_CONTAINER_NAME,\n timeout_ping=fab_conf[\"docker\"][\"timeout_ping\"].get(),\n wait_after_up=fab_conf[\"docker\"][\"sleep_after_up\"].get(),\n )\n\n ############################################################################\n # setup in_progress JSON #\n ############################################################################\n if not fab_conf[\"skip_progress_file\"]:\n json_progress_identifier = \"IN_PROGRESS-\"\n\n if fab_json_path.name.startswith(json_progress_identifier):\n in_progress_json = fab_json_path\n else:\n in_progress_json = fab_json_path.with_name(\n json_progress_identifier + fab_json_path.name\n )\n\n ############################################################################\n # Fabrication loop #\n ############################################################################\n\n to_place = setup_fab_data(clay_bullets)\n\n if not questionary.confirm(\"Ready to start program?\").ask():\n log.critical(\"Program exited because user didn't confirm start.\")\n print(\"Exiting.\")\n sys.exit()\n\n # Set speed, accel, tool, wobj and move to start pos\n pre_procedure(abb)\n\n for bullet in to_place:\n bullet.placed = None\n bullet.cycle_time = None\n\n for i, bullet in enumerate(to_place):\n current_bullet_desc = \"Bullet {:03}/{:03} with id {}.\".format(\n i, len(to_place) - 1, bullet.bullet_id\n )\n\n abb.send(PrintText(current_bullet_desc))\n log.info(current_bullet_desc)\n\n pick_frame = pick_station.get_next_frame(bullet)\n\n # Pick bullet\n pick_future = pick_bullet(abb, pick_frame)\n\n # Place bullet\n place_future = place_bullet(abb, bullet)\n\n bullet.placed = 1 # set placed to temporary value to mark it as \"placed\"\n\n # Write progress to json while waiting for robot\n if not fab_conf[\"skip_progress_file\"].get():\n with in_progress_json.open(mode=\"w\") as fp:\n json.dump(clay_bullets, fp, cls=ClayBulletEncoder)\n log.debug(\"Wrote clay_bullets to {}\".format(in_progress_json.name))\n\n # This blocks until cycle is finished\n cycle_time = pick_future.result() + place_future.result()\n\n bullet.cycle_time = cycle_time\n log.debug(\"Cycle time was {}\".format(bullet.cycle_time))\n bullet.placed = time.time()\n log.debug(\"Time placed was {}\".format(bullet.placed))\n\n ############################################################################\n # Shutdown procedure #\n ############################################################################\n\n # Write progress of last run of loop\n if not fab_conf[\"skip_progress_file\"].get():\n with in_progress_json.open(mode=\"w\") as fp:\n json.dump(clay_bullets, fp, cls=ClayBulletEncoder)\n log.debug(\"Wrote clay_bullets to {}\".format(in_progress_json.name))\n\n if (\n len([bullet for bullet in clay_bullets if bullet.placed is None]) == 0\n and not fab_conf[\"skip_progress_file\"].get()\n ):\n done_file_name = fab_json_path.name.replace(json_progress_identifier, \"\")\n done_json = fab_conf[\"paths\"][\"json_dir\"].as_path() / \"00_done\" / done_file_name\n\n in_progress_json.rename(done_json)\n\n with done_json.open(mode=\"w\") as fp:\n json.dump(clay_bullets, fp, cls=ClayBulletEncoder)\n\n log.debug(\"Saved placed bullets to 00_Done.\")\n elif not fab_conf[\"skip_progress_file\"].get():\n log.debug(\n \"Bullets without placed timestamp still present, keeping {}\".format(\n in_progress_json.name\n )\n )\n\n log.info(\"Finished program with {} bullets.\".format(len(to_place)))\n\n post_procedure(abb)",
"def main():\n parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('--config', required=True, help='Configuration file for run. Must be in shared_dir')\n parser.add_argument('-c', '--cluster_size', required=True, help='Number of workers desired in the cluster.')\n parser.add_argument('-s', '--sample_size', required=True, type=float, help='Size of the sample deisred in TB.')\n parser.add_argument('-t', '--instance_type', default='c3.8xlarge', help='e.g. m4.large or c3.8xlarge.')\n parser.add_argument('-n', '--cluster_name', required=True, help='Name of cluster.')\n parser.add_argument('--namespace', default='jtvivian', help='CGCloud NameSpace')\n parser.add_argument('--spot_price', default=0.60, help='Change spot price of instances')\n parser.add_argument('-b', '--bucket', default='tcga-data-cgl-recompute', help='Bucket where data is.')\n parser.add_argument('-d', '--shared_dir', required=True,\n help='Full path to directory with: pipeline script, launch script, config, and master key.')\n params = parser.parse_args()\n\n # Run sequence\n start = time.time()\n # Get number of samples from config\n with open(params.config, 'r') as f:\n num_samples = len(f.readlines())\n # Launch cluster and pipeline\n uuid = fix_launch(params)\n launch_cluster(params)\n ids = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')\n launch_pipeline(params)\n # Blocks until all workers are idle\n stop = time.time()\n # Collect metrics from cluster\n collect_metrics(ids, list_of_metrics, start, stop, uuid=uuid)\n # Apply \"Insta-kill\" alarm to every worker\n map(apply_alarm_to_instance, ids)\n # Kill leader\n logging.info('Killing Leader')\n leader_id = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-leader')[0]\n apply_alarm_to_instance(leader_id, threshold=5)\n # Generate Run Report\n avail_zone = get_avail_zone(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')[0]\n total_cost, avg_hourly_cost = calculate_cost(params.instance_type, ids[0], avail_zone)\n # Report values\n output = ['UUID: {}'.format(uuid),\n 'Number of Samples: {}'.format(num_samples),\n 'Number of Nodes: {}'.format(params.cluster_size),\n 'Cluster Name: {}'.format(params.cluster_name),\n 'Source Bucket: {}'.format(params.bucket),\n 'Average Hourly Cost: ${}'.format(avg_hourly_cost),\n 'Cost per Instance: ${}'.format(total_cost),\n 'Availability Zone: {}'.format(avail_zone),\n 'Start Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(start))),\n 'Stop Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(stop))),\n 'Total Cost of Cluster: ${}'.format(float(total_cost) * int(params.cluster_size)),\n 'Cost Per Sample: ${}'.format((float(total_cost) * int(params.cluster_size) / int(num_samples)))]\n with open(os.path.join(str(uuid) + '_{}'.format(str(datetime.utcnow()).split()[0]), 'run_report.txt'), 'w') as f:\n f.write('\\n'.join(output))\n # You're done!\n logging.info('\\n\\nScaling Test Complete.')",
"def test_fuel_ccp_containerized(\n self, underlay, config, k8scluster, ccp_actions, show_step):\n\n ccp_actions.default_params = settings.CCP_CLI_PARAMS\n show_step(2)\n ccp_actions.fetch_ccp()\n ccp_actions.dockerize_ccp()\n\n ccp_actions.put_yaml_config(\n path=settings.CCP_DEPLOY_CONFIG,\n config=settings.CCP_DEFAULT_GLOBALS)\n ccp_actions.put_yaml_config(\n path=settings.CCP_SOURCES_CONFIG,\n config=settings.CCP_BUILD_SOURCES)\n ccp_actions.put_yaml_config(\n path=settings.CCP_FETCH_CONFIG,\n config=settings.CCP_FETCH_PARAMS)\n\n with open(config.ccp_deploy.topology_path, 'r') as f:\n ccp_actions.put_raw_config(\n path=settings.CCP_DEPLOY_TOPOLOGY,\n content=f.read())\n\n ccp_actions.init_default_config(include_files=[\n settings.CCP_DEPLOY_CONFIG,\n settings.CCP_SOURCES_CONFIG,\n settings.CCP_DEPLOY_TOPOLOGY,\n settings.CCP_FETCH_CONFIG])\n config.ccp.os_host = config.k8s.kube_host\n\n if settings.REGISTRY == \"127.0.0.1:31500\":\n k8scluster.create_registry()\n ccp_actions.build()\n show_step(3)\n ccp_actions.deploy()\n post_os_deploy_checks.check_jobs_status(k8scluster.api)\n post_os_deploy_checks.check_pods_status(k8scluster.api)\n show_step(4)\n remote = underlay.remote(host=config.k8s.kube_host)\n underlay.sudo_check_call(\"pip install python-openstackclient\",\n host=config.k8s.kube_host)\n remote.check_call(\n \"source openrc-{0}; bash fuel-ccp/tools/deploy-test-vms.sh -k {0}\"\n \" -a create\".format(\n settings.CCP_CONF[\"kubernetes\"][\"namespace\"]),\n timeout=600)",
"def test_redeploy_container_asset(self):\n pass",
"def main():\n\n # get AWS credentials\n aws_credentials = read_aws_credentials()\n access_key_id = aws_credentials['access_key_id']\n secret_access_key = aws_credentials['secret_access_key']\n aws_region = aws_credentials['region']\n\n # build Docker image\n docker_client = docker.from_env()\n image, build_log = docker_client.images.build(\n path='.', tag=LOCAL_REPOSITORY, rm=True)\n\n # get AWS ECR login token\n ecr_client = boto3.client(\n 'ecr', aws_access_key_id=access_key_id, \n aws_secret_access_key=secret_access_key, region_name=aws_region)\n\n ecr_credentials = (\n ecr_client\n .get_authorization_token()\n ['authorizationData'][0])\n\n ecr_username = 'AWS'\n\n ecr_password = (\n base64.b64decode(ecr_credentials['authorizationToken'])\n .replace(b'AWS:', b'')\n .decode('utf-8'))\n\n ecr_url = ecr_credentials['proxyEndpoint']\n\n # get Docker to login/authenticate with ECR\n docker_client.login(\n username=ecr_username, password=ecr_password, registry=ecr_url)\n\n # tag image for AWS ECR\n ecr_repo_name = '{}/{}'.format(\n ecr_url.replace('https://', ''), LOCAL_REPOSITORY)\n\n image.tag(ecr_repo_name, tag='latest')\n\n # push image to AWS ECR\n push_log = docker_client.images.push(ecr_repo_name, tag='latest')\n\n # force new deployment of ECS service\n ecs_client = boto3.client(\n 'ecs', aws_access_key_id=access_key_id,\n aws_secret_access_key=secret_access_key, region_name=aws_region)\n\n ecs_client.update_service(\n cluster=ECS_CLUSTER, service=ECS_SERVICE, forceNewDeployment=True)\n\n return None",
"def main():\n with open('config.json') as config_file:\n configs = json.load(config_file)\n\n jar_list = utilities.upload_jars(configs)\n utilities.sign_jars(configs)\n\n artifact_folder = utilities.prepare_artifacts(configs, jar_list)\n\n repo_id = utilities.create_staging_repo(configs)\n utilities.deploy_to_staging_repo(configs, artifact_folder, repo_id)\n utilities.close_staging_repo(configs, repo_id)",
"def _execute_container(self):\n pass",
"def submit_dag(self):\n os.chdir(self.production.rundir)\n os.system(\"cat *_local.cache > local.cache\")\n\n for psdfile in self.production.get_psds(\"xml\"):\n ifo = psdfile.split(\"/\")[-1].split(\"_\")[1].split(\".\")[0]\n os.system(f\"cp {psdfile} {ifo}-psd.xml.gz\")\n\n\n self.before_submit()\n \n try:\n command = [\"condor_submit_dag\", \n \"-batch-name\", f\"rift/{self.production.event.name}/{self.production.name}\",\n os.path.join(self.production.rundir, \"marginalize_intrinsic_parameters_BasicIterationWorkflow.dag\")]\n dagman = subprocess.Popen(command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n self.logger.info(command, production = self.production)\n except FileNotFoundError as error:\n raise PipelineException(\"It looks like condor isn't installed on this system.\\n\"\n f\"\"\"I wanted to run {\" \".join(command)}.\"\"\")\n\n stdout, stderr = dagman.communicate()\n\n\n if \"submitted to cluster\" in str(stdout):\n cluster = re.search(\"submitted to cluster ([\\d]+)\", str(stdout)).groups()[0]\n self.production.status = \"running\"\n self.production.job_id = int(cluster)\n return cluster, PipelineLogger(stdout)\n else:\n raise PipelineException(f\"The DAG file could not be submitted.\\n\\n{stdout}\\n\\n{stderr}\",\n issue=self.production.event.issue_object,\n production=self.production.name)",
"def placement_route_artifact(args):\n clarity_epp.placement.artifact.route_to_workflow(lims, args.process_id, args.workflow)",
"def deploy():\n return do_deploy(do_pack())",
"def deploy():\n return do_deploy(do_pack())",
"def deploy():\n return do_deploy(do_pack())",
"def launch_container(self, service_info):\n user, instance = _get_user_and_instance(\n self.girder_client, service_info['instanceId'])\n tale = self.girder_client.get(f\"/tale/{service_info['taleId']}\")\n\n self.job_manager.updateProgress(\n message='Starting container', total=LAUNCH_CONTAINER_STEP_TOTAL,\n current=1, forceFlush=True)\n\n print(\"Launching container for a Tale...\")\n if 'imageInfo' not in tale:\n\n # Wait for image to be built\n tic = time.time()\n timeout = 180.0\n time_interval = 5\n\n while time.time() - tic < timeout:\n tale = self.girder_client.get('/tale/{taleId}'.format(**instance))\n if 'imageInfo' in tale and 'digest' in tale['imageInfo']:\n break\n msg = f\"Waiting for image build to complete. ({time_interval}s)\"\n logging.info(msg)\n print(msg)\n time.sleep(5)\n\n container_config = _get_container_config(self.girder_client, tale)\n service, attrs = _launch_container(service_info, container_config)\n print(\n f\"Started a container using volume: {service_info['volumeName']} \"\n f\"on node: {service_info['nodeId']}\"\n )\n\n # wait until task is started\n tic = time.time()\n timeout = 300.0\n started = False\n\n print(\"Waiting for the environment to be accessible...\")\n while time.time() - tic < timeout:\n try:\n status = service.tasks()[0]['Status']\n\n if status['State'] in {\"failed\", \"rejected\"}:\n raise ValueError(\"Failed to start environment: %s\" % status['Err'])\n elif status['State'] == \"running\":\n started = True\n break\n\n except IndexError:\n started = False\n\n time.sleep(0.2)\n\n if not started:\n raise ValueError(\"Tale did not start before timeout exceeded\")\n\n print(\"Environment is up and running.\")\n self.job_manager.updateProgress(\n message='Container started', total=LAUNCH_CONTAINER_STEP_TOTAL,\n current=LAUNCH_CONTAINER_STEP_TOTAL, forceFlush=True)\n\n service_info.update(attrs)\n service_info['name'] = service.name\n return service_info",
"def deploy():\n build()\n collect()\n commit()\n push()",
"def main():\n parser = argparse.ArgumentParser(description='Create packaged set of modulefiles for deployment on OASIS.')\n parser.add_argument('--location', dest='location', default=None,\n help='Location directory to place files in')\n parser.add_argument('--tarfile', dest='tarfile', default=None,\n help='Name of tarfile to generate')\n args = parser.parse_args(sys.argv[1:])\n if args.location is None:\n args.location = tempfile.mkdtemp()\n elif os.path.exists(args.location):\n overwrite = raw_input(\"{0} exists, overwrite? \".format(args.location))\n if overwrite.lower().strip() != 'y':\n sys.stderr.write(\"Exiting...\")\n sys.exit(0)\n shutil.rmtree(args.location)\n os.mkdir(args.location)\n else:\n os.mkdir(args.location)\n location = checkout_repo(args.location) \n if location is None:\n sys.stderr.write(\"Can't checkout modulefiles to {0}!\\n\".format(args.location))\n package_files(location)\n if args.tarfile is None:\n args.tarfile = \"/tmp/moduleupdate.tar.gz\"\n if tar_files(location, args.tarfile) is None:\n sys.stderr.write(\"Error generating tarfile, exiting\\n\")\n sys.exit(1)\n shutil.rmtree(location)\n sys.stdout.write(\"Packaged files located at {0}\\n\".format(args.tarfile))",
"def deploy():\n build()\n copy()\n install()",
"def test_bm_deploy(self, config, underlay,\n openstack_deployed,\n tempest_actions):\n openstack_deployed._salt.local(\n tgt='*', fun='cmd.run',\n args='service ntp stop; ntpd -gq; service ntp start')\n\n if settings.RUN_TEMPEST:\n tempest_actions.prepare_and_run_tempest()\n LOG.info(\"*************** DONE **************\")",
"def test_bm_deploy(self, config, underlay,\n openstack_deployed,\n tempest_actions):\n openstack_deployed._salt.local(\n tgt='*', fun='cmd.run',\n args='service ntp stop; ntpd -gq; service ntp start')\n\n if settings.RUN_TEMPEST:\n tempest_actions.prepare_and_run_tempest()\n LOG.info(\"*************** DONE **************\")",
"def process(self, container):\n pass;",
"def setup_for_execution_testcase(self, testcase_dependencies):\n os.chdir(self.tmp_work)\n for container in self.containers:\n self._setup_single_directory_for_execution(container.directory, testcase_dependencies)\n self._run_pre_commands(container.directory)\n\n # Copy in the submitty_router if necessary.\n if container.import_router:\n router_path = os.path.join(self.tmp_autograding, \"bin\", \"submitty_router.py\")\n self.log_message(f\"COPYING:\\n\\t{router_path}\\n\\t{container.directory}\")\n shutil.copy(router_path, container.directory)\n autograding_utils.add_all_permissions(container.directory)",
"def child_case():\n result = ObjectContainer()\n\n flow1 = Flow(\"flow1\")\n result.flow1 = flow1\n\n with flow1.add_container(\"container1\") as container1:\n result.container1 = container1\n with container1.add_task(\"task1\") as task1:\n result.task1 = task1\n with container1.add_task(\"task2\") as task2:\n result.task2 = task2\n\n flow1.requeue()\n\n return result",
"def runContainerCluster(towerVersion, osVersion, namingConvention, stream=True, **kwargs):\n # runContainerCluster() defaults; can be overriden via **kwargs\n externalPort = None\n containerCount = 3\n debug = True\n loadBalance = False\n\n # Optional debug that prints a dict of options\n if debug:\n runClusterOpts = dict(towerVersion=towerVersion, osVersion=osVersion, loadBalance=loadBalance, namingConvention=namingConvention, externalPort=externalPort, containerCount=containerCount, debug=debug)\n print(runClusterOpts)\n\n # Check to see if specified towerVersion has image built\n check = towerdev.utilities.imageCheck(towerVersion)\n\n # How we proceed with imageCheck() return\n if check is False:\n print(color(\"ERROR: Deployment of container cluster failed. Please make sure the specified version of Tower has an image built.\", fg=\"red\"))\n return False\n else:\n for c in range(containerCount):\n runTowerContainer(towerVersion=towerVersion, externalPort=externalPort, osVersion=osVersion, containerName=\"{0}-{1}\".format(namingConvention,c))\n\n clusterContainers = dockerClient.containers.list(filters={'name': '{0}-*'.format(namingConvention)})\n containerIps = []\n\n # Gather container IPs for inventory fillout\n for c in range(len(clusterContainers)):\n containerIp = clusterContainers[c].attrs['NetworkSettings']['IPAddress']\n containerIps.append(containerIp)\n\n print(clusterContainers[0])\n\n # Choose inventory file based on towerVersion\n if \"3.5\" in towerVersion:\n chooseInventoryCmd = 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.5.x /opt/ansible-tower-setup-{0}-1/inventory'.format(towerVersion)\n runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)\n elif \"3.6\" in towerVersion:\n chooseInventoryCmd = 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.6.x /opt/ansible-tower-setup-{0}-1/inventory'.format(towerVersion)\n runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)\n elif \"3.7\" in towerVersion:\n chooseInventoryCmd = 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.7.x /opt/ansible-tower-setup-{0}-1/inventory'.format(towerVersion)\n runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)\n elif \"3.8\" in towerVersion:\n chooseInventoryCmd = 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.8.x /opt/ansible-tower-setup-{0}-1/inventory'.format(towerVersion)\n runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)\n\n # Choose messaging backend based on towerVersion\n if \"3.5\" in towerVersion:\n for i in containerIps:\n modifyInventoryCmd = 'sed -i \"2i{0} rabbitmq_host={0}\" /opt/ansible-tower-setup-{1}-1/inventory'.format(i, towerVersion)\n runInventoryCmd = clusterContainers[0].exec_run(cmd=modifyInventoryCmd)\n elif \"3.6\" in towerVersion:\n for i in containerIps:\n modifyInventoryCmd = 'sed -i \"2i{0} rabbitmq_host={0}\" /opt/ansible-tower-setup-{1}-1/inventory'.format(i, towerVersion)\n runInventoryCmd = clusterContainers[0].exec_run(cmd=modifyInventoryCmd)\n elif \"3.7\" in towerVersion:\n for i in containerIps:\n modifyInventoryCmd = 'sed -i \"2i{0} routable_hostname={0}\" /opt/ansible-tower-setup-{1}-1/inventory'.format(i, towerVersion)\n runInventoryCmd = clusterContainers[0].exec_run(cmd=modifyInventoryCmd)\n elif \"3.8\" in towerVersion:\n for i in containerIps:\n modifyInventoryCmd = 'sed -i \"2i{0} routable_hostname={0}\" /opt/ansible-tower-setup-{1}-1/inventory'.format(i, towerVersion)\n runInventoryCmd = clusterContainers[0].exec_run(cmd=modifyInventoryCmd)\n\n # Call ./setup.sh from first container in list\n setupCmd = '/bin/bash -c \"cd /opt/ansible-tower-setup-{0}-1 && ./setup.sh\"'.format(towerVersion)\n setupLbCmd = '/bin/bash -c \"cd /opt/ansible-tower-setup-{0}-1 && ./setup.sh -e nginx_disable_https=true\"'.format(towerVersion)\n inventoryDbVersion = towerVersion.replace(\".\", \"\")\n modifyInventoryDbCmd = \"sed -i 's/XXX/{0}/g' /opt/ansible-tower-setup-{1}-1/inventory\".format(inventoryDbVersion, towerVersion)\n runDatabaseCmd = clusterContainers[0].exec_run(cmd=modifyInventoryDbCmd)\n\n if loadBalance:\n print(color(\"INFO: Running ./setup.sh with load balance configuration...\", fg=\"yellow\"))\n\n # Stream output based on option\n if stream:\n lowLevelClient = towerdev.common.apiClient()\n calcRunContainer = len(clusterContainers) - 1\n createExec = lowLevelClient.exec_create(container=\"{0}-{1}\".format(namingConvention, calcRunContainer), cmd=setupLbCmd)\n runSetupCmd = lowLevelClient.exec_start(exec_id=createExec['Id'], stream=True, detach=False)\n\n for line in runSetupCmd:\n print(line.decode('utf-8'))\n\n inspect = lowLevelClient.exec_inspect(exec_id=createExec['Id'])\n setupCmdCode = inspect['ExitCode']\n\n containersList = dockerClient.containers.list(filters={'name': '{0}-*'.format(namingConvention)})\n\n if len(containersList) == containerCount:\n clusterStatus = True\n else:\n clusterStatus = False\n\n if setupCmdCode is not 0:\n clusterStatus = False\n\n else:\n runSetupCmd = towerContainer.exec_run(cmd=setupLbCmd)\n\n else:\n print(color(\"INFO: Running ./setup.sh with no load balance configuration...\", fg=\"yellow\"))\n\n # Stream output based on option\n if stream:\n lowLevelClient = towerdev.common.apiClient()\n calcRunContainer = len(clusterContainers) - 1\n createExec = lowLevelClient.exec_create(container=\"{0}-{1}\".format(namingConvention, calcRunContainer), cmd=setupCmd)\n runSetupCmd = lowLevelClient.exec_start(exec_id=createExec['Id'], stream=True, detach=False)\n\n for line in runSetupCmd:\n print(line.decode('utf-8'))\n\n inspect = lowLevelClient.exec_inspect(exec_id=createExec['Id'])\n setupCmdCode = inspect['ExitCode']\n containersList = dockerClient.containers.list(filters={'name': '{0}-*'.format(namingConvention)})\n\n if len(containersList) == containerCount:\n clusterStatus = True\n else:\n clusterStatus = False\n\n if setupCmdCode is not 0:\n clusterStatus = False\n\n else:\n runSetupCmd = towerContainer.exec_run(cmd=setupCmd)\n containersList = dockerClient.containers.list(filters={'name': '{0}-*'.format(namingConvention)})\n\n if len(containersList) == containerCount:\n clusterStatus = True\n else:\n clusterStatus = False\n\n if runSetupCmd[0] is not 0:\n clusterStatus = False\n\n return clusterStatus",
"def run(self, container_config: ContainerConfig) -> Container:",
"def deploy(self, topology):\n print \"ABC - Deployer.deploy()\"",
"def deploy(tag,num, names):\n len=0\n click.echo('*** DEPLOYMENT IS INITIATED\\n')\n for name in names:\n len+=1\n if (num==len):\n for name in names:\n try:\n response_container = client.containers.run(tag, name=name, detach=True)\n container_id = response_container.id\n index = slice(12)\n click.echo(\"Container \"+container_id[index]+\" was created\")\n except Exception as ex:\n template = \"An exception of type {0} occured. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n click.echo(message)\n elif(len==1):\n for x in range(num):\n try:\n response_container = client.containers.run(tag, name=name+\"_\"+str(random.randrange(0,1000)), detach=True)\n container_id = response_container.id\n index = slice(12)\n click.echo(\"Container \"+container_id[index]+\" was created\")\n except Exception as ex:\n template = \"An exception of type {0} occured. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n click.echo(message)\n else:\n click.echo(\"\\n You have to give the same nummber of containers for generation and names OR a number of containers and one name\")",
"def main():\n # This have specific paths to prevent abitrary binaries from being\n # executed. The \"gsi\"* utilities are configured to use either grid proxies\n # or ssh, automatically.\n remoteLoginCmd = \"/usr/bin/gsissh\"\n remoteCopyCmd = \"/usr/bin/gsiscp\"\n\n UNKNOWN_PLATFORM_EXIT_CODE = 10\n MISSING_PBS_CONFIG_EXIT_CODE = 20\n\n p = AllocatorParser(sys.argv[0])\n platform = p.getPlatform()\n\n creator = Allocator(platform, p.getArgs(), \"$HOME/.lsst/condor-info.py\")\n\n platformPkgDir = lsst.utils.getPackageDir(\"ctrl_platform_\"+platform)\n configName = os.path.join(platformPkgDir, \"etc\", \"config\", \"pbsConfig.py\")\n execConfigName = os.path.join(platformPkgDir, \"etc\", \"config\", \"execConfig.py\")\n\n creator.load(execConfigName)\n\n creator.loadPbs(configName)\n\n verbose = creator.isVerbose()\n \n pbsName = os.path.join(platformPkgDir, \"etc\", \"templates\", \"generic.pbs.template\")\n generatedPbsFile = creator.createPbsFile(pbsName)\n\n condorFile = os.path.join(platformPkgDir, \"etc\", \"templates\", \"glidein_condor_config.template\")\n generatedCondorConfigFile = creator.createCondorConfigFile(condorFile)\n\n scratchDirParam = creator.getScratchDirectory()\n template = Template(scratchDirParam)\n scratchDir = template.substitute(USER_HOME=creator.getUserHome())\n userName = creator.getUserName()\n \n hostName = creator.getHostName()\n\n utilityPath = creator.getUtilityPath()\n\n #\n # execute copy of PBS file to XSEDE node\n #\n cmd = \"%s %s %s@%s:%s/%s\" % (remoteCopyCmd, generatedPbsFile, userName, hostName, scratchDir, os.path.basename(generatedPbsFile))\n if verbose:\n print cmd\n exitCode = runCommand(cmd, verbose)\n if exitCode != 0:\n print \"error running %s to %s.\" % (remoteCopyCmd, hostName)\n sys.exit(exitCode)\n\n #\n # execute copy of Condor config file to XSEDE node\n #\n cmd = \"%s %s %s@%s:%s/%s\" % (remoteCopyCmd, generatedCondorConfigFile, userName, hostName, scratchDir, os.path.basename(generatedCondorConfigFile))\n if verbose:\n print cmd\n exitCode = runCommand(cmd, verbose)\n if exitCode != 0:\n print \"error running %s to %s.\" % (remoteCopyCmd, hostName)\n sys.exit(exitCode)\n\n #\n # execute qsub command on XSEDE node to perform Condor glide-in\n #\n cmd = \"%s %s@%s %s/qsub %s/%s\" % (remoteLoginCmd, userName, hostName, utilityPath, scratchDir, os.path.basename(generatedPbsFile))\n if verbose:\n print cmd\n exitCode = runCommand(cmd, verbose)\n if exitCode != 0:\n print \"error running %s to %s.\" % (remoteLoginCmd, hostName)\n sys.exit(exitCode)\n\n nodes = creator.getNodes()\n slots = creator.getSlots()\n wallClock = creator.getWallClock()\n nodeString = \"\"\n if int(nodes) > 1:\n nodeString = \"s\"\n print \"%s node%s will be allocated on %s with %s slots per node and maximum time limit of %s\" % (nodes, nodeString, platform, slots, wallClock)\n print \"Node set name:\"\n print creator.getNodeSetName()\n sys.exit(0)"
]
| [
"0.617238",
"0.58995914",
"0.5847137",
"0.58073807",
"0.55927074",
"0.5573862",
"0.5552047",
"0.5548372",
"0.55465376",
"0.5510823",
"0.5471285",
"0.54646164",
"0.5461811",
"0.54609716",
"0.54609716",
"0.54609716",
"0.5436042",
"0.5430517",
"0.54251856",
"0.542289",
"0.54116124",
"0.54116124",
"0.53921574",
"0.53886014",
"0.5385823",
"0.5342366",
"0.5332394",
"0.53282225",
"0.5327888",
"0.5321336"
]
| 0.6592639 | 0 |
Function to retrieve a person from the database to store in client model | def get_person(self, id):
PERSON = """SELECT name FROM Person
WHERE id = %s"""
ret = None
try:
self.db_cursor.execute("""SELECT name, id FROM Person WHERE id = %s""", (id,))
self.db_cursor.execute(PERSON, (id,))
self.db_connection.commit()
p_attribs = self.db_cursor.fetchall()
ret = Person()
ret.name = p_attribs[0][0]
ret.id = id
except:
logging.warning("DBAdapter: Error- cannot retrieve person: " + str(id))
return None
return ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def select_person():\r\n body = request.get_json()\r\n\r\n try:\r\n SELECT_PERSON_SCHEMA.validate(body)\r\n except SchemaError as err:\r\n raise ServiceBodyError(str(err))\r\n\r\n with sqlite_client:\r\n message = get_person(sqlite_client, body.get('id'))\r\n\r\n return jsonify({'name': message[0][1], 'cpf': message[0][2]})",
"def read_person(person_id):\n try:\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n c.execute(\"SELECT * FROM person WHERE personid =?\", (person_id,))\n _person = None\n for row in c:\n _person = Person()\n _person.person_id = row[\"personid\"]\n _person.first_name = row[\"firstname\"]\n _person.last_name = row[\"lastname\"]\n _person.middle_initial = row[\"middleinitial\"]\n _person.nick_name = row[\"nickname\"]\n _person.date_of_birth = row[\"dateofbirth\"]\n _person.date_of_death = row[\"dateofdeath\"]\n conn.close()\n return _person\n except:\n return None",
"def get(self,id):\r\n person = get_one_by_persons_id(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person",
"def get(self,id):\r\n person = get_one(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person",
"def fetch_by_id(self, person_id: int) -> PersonModel:\n person_db_model = PersonDBModel.query.get(person_id)\n if not person_db_model:\n raise PersonNotFound(person_id)\n person = PersonModel.from_db(person_db_model)\n self.logger.info(f'Successfully fetched Person {person.first_name} {person.last_name} by ID {person_id}')\n return person",
"def get_person(request):\n\n email = request.args.get(\"email\", None, str)\n # log_info(\"email is \" + email)\n\n if not email:\n log_info(\"get_person was called, but no email was provided in request\")\n return None\n\n if validators.email(email) and (email_requester := auth.check_teacher(request)):\n if email_requester and validators.email(email_requester):\n db = database.Database()\n student = db.get_student(email)\n return dict(student)\n\n elif validators.email(email) and (email_requester := auth.check_login(request)):\n if email_requester and validators.email(email_requester) and email == email_requester:\n db = database.Database()\n student = db.get_student(email)\n if 'notes' in student:\n del student['notes']\n\n return dict(student)\n\n log_info(\"No person with email \" + email + \" found in database\")\n return None",
"def get_person(self, id):\n try:\n person = Person.get(Person.id == id)\n data = model_to_dict(person)\n except DoesNotExist:\n response.status = 404\n data = \"Not found\"\n return dict(name='Person', data=data)",
"def retrieve_from_db(self):\n pass",
"def get_one(self, index, *args, **kw):\n person = M.People.query.get(index=index)\n log.debug('person {}'.format(person))\n if(person):\n kw['_id'] = person._id\n return super(PeopleAPIController, self).get_one(*args, **kw)",
"def test_05_get_person_by_name(self):\n p1 = Person.query.first()\n p1_data = p1.wrap()\n p1_f_name = p1_data[\"first_name\"]\n # find by first name only\n # get part of name and search\n q_string = \"?first_name={}\".format(p1_f_name[:3]) # TODO - verify the length\n rv = self.app.get('persons', query_string=q_string)\n data = json.loads(rv.data)\n self.assertEqual(data[\"count\"], 1)\n\n # find by first name and last name\n p1_l_name = p1_data[\"last_name\"]\n q_string = \"?first_name={}&last_name={}\".format(p1_f_name[:3], p1_l_name)\n rv = self.app.get('persons', query_string=q_string)\n data = json.loads(rv.data)\n self.assertEqual(data[\"count\"], 1)\n\n # find by first name and non-existing last name\n q_string = \"?first_name={}&last_name={}\".format(p1_f_name[:3], \"iAmNotThere\")\n rv = self.app.get('persons', query_string=q_string)\n data = json.loads(rv.data)\n self.assertEqual(data[\"count\"], 0)",
"def get_person(self, user_id):\n endpoint = '/user/{}'.format(user_id)\n return self.get_request(endpoint)",
"def get_object(self):\n # return Person.objects.get(user=self.request.user)\n p,c = Person.objects.get_or_create(user=self.request.user)\n return p",
"def person(self, person_id):\r\n return persons.Person(self, person_id)",
"def get_new_entry(in_person):\n person = Person()\n person.name = get_name()\n person.phone = input(\"Enter Phone Number: \")\n return person",
"def get_person(request, person_id):\n person = get_object_or_404(Person, pk=person_id)\n\n\n return render_to_response('people/person_detail.html', {\n 'person': person,\n })",
"def read_one(lname):\n # Does the person exist in people?\n if lname in PEOPLE:\n person = PEOPLE.get(lname)\n\n # otherwise, nope, not found\n else:\n abort(\n 404, \"Person with last name {lname} not found\".format(lname=lname)\n )\n\n return person",
"def get_object(self,pk):\n # return Person.objects.get(user=self.request.user)\n p,c = Person.objects.get_or_create(user_id=pk)\n return p",
"def get_person(self, requestId):\n return self.get_json('/verification/%s/person' % str(requestId))",
"def get(self, email):\n adm = Administration()\n pers = adm.get_person_by_google_mail(email)\n return pers",
"def person(request, pk):\r\n person = get_object_or_404(Person, pk=pk)\r\n return HttpResponse('Person: %s' % person)",
"def current_person(self):\n d = self.people_table_data[self.row_i]\n\n # \"fullname\", \"lunaid\", \"age\", \"dob\", \"sex\", \"lastvisit\", \"maxdrop\", \"studies\",\n info = dict(zip(self.person_columns, d))\n info[\"pid\"] = d[8] # pid not shown\n\n # dont get fname and lname from table\n # could word split, but need to be accurate at least for edit module\n if self.sql:\n res = self.sql.query.get_name(pid=info[\"pid\"])\n info[\"fname\"] = res[0][0]\n info[\"lname\"] = res[0][1]\n return info\n # # main model\n # self.checkin_button.setEnabled(False)\n # print('people table: subject selected: %s' % d[8])\n # self.render_person(pid=d[8], fullname=d[0], age=d[2],\n # sex=d[4], lunaid=d[1])\n # self.render_schedule(ScheduleFrom.PERSON)",
"def get_people(self):\n cursor = self.cur()\n cursor.execute('SELECT * FROM {tn} '.format(tn=\"person\"))\n all_people = cursor.fetchall()\n return all_people",
"def getByName( self, people_name ):\n qry = \"\"\"SELECT * FROM `%s`.`people` WHERE `name` = \"%s\"; \"\"\" % ( self.db_name, Mysql.escape_string( person_name ) )\n person = Mysql.ex( qry )\n if len( person ) == 0:\n return False\n return person[0]",
"def find(self, person):\n page = self.find_page(person)\n try:\n entity_id = self.get_entity_id(page.title)\n entity = self.get_entity(entity_id)\n person.dob = self.get_birthday(entity)\n person.occupation = self.get_occupation(entity)\n person.nationality = self.get_country_of_citizenship(entity)\n res_domicile = self.get_domicile(entity)\n if res_domicile:\n person.domicile = res_domicile\n elif person.nationality == self.get_birthcountry(entity):\n person.domicile = person.nationality # this is an assumption!\n birth_name = self.get_birth_name(entity)\n person.middle_name = self.get_middle_name(birth_name, person)\n if page:\n person.is_famous = 'True'\n else:\n person.is_famous = ''\n person.net_worth = self.get_networth(entity)\n person.description = page.summary\n person.set_raw()\n except:\n pass",
"def read_people():\n try:\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n c.execute(\"SELECT * FROM person LIMIT {0};\".format(settings.search_result_row_limit))\n p = []\n for row in c:\n _person = Person()\n _person.person_id = row[\"personid\"]\n _person.first_name = row[\"firstname\"]\n _person.last_name = row[\"lastname\"]\n _person.middle_initial = row[\"middleinitial\"]\n _person.nick_name = row[\"nickname\"]\n _person.date_of_birth = row[\"dateofbirth\"]\n _person.date_of_death = row[\"dateofdeath\"]\n p.append(_person)\n conn.close()\n return p\n except:\n return []",
"def fetch(self, person=None):\n if not person:\n # get the list.\n self.endpoint = 'people.json'\n else:\n self.endpoint = 'people/{0}.json'.format(person)\n\n request = self.get(self.construct_url())\n\n if request.status_code == 200:\n return json.loads(request.content)\n\n raise BasecampAPIError()",
"def get_person_name(self, person_id):\n res = requests.get(url=\"https://api.ciscospark.com/v1/people/{}\".format(person_id),\n headers=self.headers)\n\n try:\n class person(object):\n firstName = res.json()['firstName']\n lastName = res.json()['lastName']\n\n return person\n except AttributeError as e:\n print(res.text)\n return None",
"def get(cls, external_id, local_user_id, provider_name, db_session=None):\n db_session = get_db_session(db_session)\n return db_session.query(cls.model).get(\n [external_id, local_user_id, provider_name]\n )",
"def select_person_by_id(conn, person_id):\n sql = \"\"\"SELECT * FROM person WHERE id=?\"\"\"\n cur = conn.cursor()\n try:\n cur.execute(sql, (person_id,))\n data = cur.fetchall()\n if data:\n userid = (data[0][0])\n print \"\\nQuerying for userID {}\\n\".format(userid)\n print sql_pp(cur, data)\n except OperationalError, msg:\n print \"SQL error {} while running our code\".format(msg)",
"def viewOne(id):\n print(inspect.stack()[1][3])\n query = select([Followup]).where(Followup.columns.id == id)\n ResultProxy = connection.execute(query)\n ResultSet = ResultProxy.fetchone()\n if(not ResultSet):\n return {'error': 'Unable to find the given client'}\n return list_to_json(ResultSet)"
]
| [
"0.71754307",
"0.68746483",
"0.6805197",
"0.68045425",
"0.6589222",
"0.6520086",
"0.6469705",
"0.63906187",
"0.63485867",
"0.62637526",
"0.6231145",
"0.6229764",
"0.61607337",
"0.6155425",
"0.6108049",
"0.60942554",
"0.6072058",
"0.5982313",
"0.5974648",
"0.5969352",
"0.5954478",
"0.59223187",
"0.5899272",
"0.58853996",
"0.5865698",
"0.5855374",
"0.5839932",
"0.5839431",
"0.5827672",
"0.58102524"
]
| 0.69603175 | 1 |
Function to determine if the list of courses is in the general courses table | def validate_new_curriculum_courses(self, curriculum_courses):
for cur in curriculum_courses:
# check to make sure its in the general courses table
self.db_cursor.execute("""SELECT COUNT(*) FROM Course WHERE name = %s""", (cur,))
ct = self.db_cursor.fetchone()
ct = ct[0]
if ct == 0:
print("course does not exist, we must create new one or cancel") # todo
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_course_exists(self):\n db = Course._file.read_db()\n courses = db[\"courses\"]\n for crs in courses:\n if crs[\"course_name\"] == self._course_name:\n return True\n break\n return False",
"def course_in_courses(self, mnemo, courses):\n for course_id, course in enumerate(courses):\n if mnemo in course.values():\n # tuple is returned here, so that converting to bool with id = 0 result was True\n return course_id,\n return False",
"def check_course_available(data, course):\n for i in range(len(data['menu']['meal']['course'])):\n for key, value in data['menu']['meal']['course'][i].items():\n if key == 'name':\n if value.upper() == course.upper():\n return True\n return False",
"def course_tester(courses):\n\n return False",
"def validate_course(self, id_curso):\n from openedx.core.djangoapps.content.course_overviews.models import CourseOverview\n try:\n aux = CourseKey.from_string(id_curso)\n return CourseOverview.objects.filter(id=aux).exists()\n except InvalidKeyError:\n return False",
"def test_has_course(self):\r\n check_has_course_method(\r\n XMLModuleStore(DATA_DIR, course_dirs=['toy', 'simple']),\r\n SlashSeparatedCourseKey('edX', 'toy', '2012_Fall'),\r\n locator_key_fields=SlashSeparatedCourseKey.KEY_FIELDS\r\n )",
"def __contains__(self,x):\n dbg('_gradelist[..].__contains__(',x,')')\n nems = self._grades #assuming x is a Grade\n if not isinstance(x,Grade): \n #probably looking for a name!\n nems = [q.name for q in self._grades]\n dbg('_gradelist[..].__contains__.nems = ',*nems)\n for i in nems:\n if x==i:\n return True\n return False",
"def __contains__(self, nom_canal):\n return nom_canal in dict(self._canaux)",
"def course_contains(original, courses):\n for name in courses:\n compiler = re.compile(name)\n if compiler.search(original) is not None:\n return name\n return None",
"def has_course_navigator(self, resp):\r\n self.assertContains(resp, \"Course Info\")\r\n self.assertContains(resp, \"courseware\")",
"def contains_comp (x):\r\n\r\n for comp in COMPTERMS:\r\n if comp in x:\r\n return True\r\n return False",
"def test_no_such_course(self):\r\n for course_key in [\r\n\r\n SlashSeparatedCourseKey(*fields)\r\n for fields in [\r\n ['edX', 'simple', 'no_such_course'], ['edX', 'no_such_course', '2012_Fall'],\r\n ['NO_SUCH_COURSE', 'Test_iMport_courSe', '2012_Fall'],\r\n ]\r\n ]:\r\n course = self.store.get_course(course_key)\r\n assert_is_none(course)\r\n assert_false(self.store.has_course(course_key))\r\n mix_cased = SlashSeparatedCourseKey(\r\n course_key.org.lower(), course_key.course.upper(), course_key.run.upper()\r\n )\r\n assert_false(self.store.has_course(mix_cased))\r\n assert_false(self.store.has_course(mix_cased, ignore_case=True))",
"def isCourseObject(self):\n return 'Course' == self.ecparent.Type()",
"def contains_comp (x):\r\n\r\n for comp in COMPTERMS:\r\n if comp in x:\r\n return True\r\n return False",
"def verify_courses(self, courses):\n assert len(courses) == 1\n self.verify_course(courses[0])",
"def has_course(self, course_key, ignore_case=False):\r\n assert(isinstance(course_key, SlashSeparatedCourseKey))\r\n location = course_key.make_usage_key('course', course_key.run)\r\n if ignore_case:\r\n course_query = location.to_deprecated_son('_id.')\r\n for key in course_query.iterkeys():\r\n if isinstance(course_query[key], basestring):\r\n course_query[key] = re.compile(r\"(?i)^{}$\".format(course_query[key]))\r\n else:\r\n course_query = {'_id': location.to_deprecated_son()}\r\n return self.collection.find_one(course_query, fields={'_id': True}) is not None",
"def contained_in_order(cls, order, course_id):\r\n return course_id in [item.paidcourseregistration.course_id\r\n for item in order.orderitem_set.all().select_subclasses(\"paidcourseregistration\")]",
"def is_course_cohorted(course_key):\r\n return courses.get_course_by_id(course_key).is_cohorted",
"def attended_college(df):\n\n # Checking to see if the posting requires a college degree\n if df.degree == 'NONE' or df.degree == 'HIGH_SCHOOL':\n return False\n else:\n return True",
"def is_enrolled(self, course):\n return bool(\n self.enrolled_courses_dict.get(course.get_namespace_name()))",
"def check_db():\n \n with open(\"courses_2016.json\") as data:\n data = data.read()\n\n courses = json.loads(data)\n course_keys_in_db = Course.query().fetch(keys_only=True)\n\n db_list = []\n failures = []\n\n for course in course_keys_in_db:\n db_list.append(course.id())\n failures = [i for i in courses if i.replace(\" \",\"\") not in db_list]\n\n return failures",
"def test_xml_get_courses(self):\r\n self.initdb('direct')\r\n courses = self.store.modulestores['xml'].get_courses()\r\n self.assertEqual(len(courses), 2)\r\n course_ids = [course.id for course in courses]\r\n self.assertIn(self.course_locations[self.XML_COURSEID1].course_key, course_ids)\r\n self.assertIn(self.course_locations[self.XML_COURSEID2].course_key, course_ids)\r\n # this course is in the directory from which we loaded courses but not in the map\r\n self.assertNotIn(\"edX/toy/TT_2012_Fall\", course_ids)",
"def can_place(self, course: 'Course'):\n if type(course) != Course:\n raise TypeError(\"Cannot use argument of type {} in list of type <class 'Course'>.\".format(type(course)))\n\n key = course.get_course_code()\n\n # Check if the course already exists and can only be taken once\n if key in self._courses and not course.multi:\n return False\n\n # Check credit load before adding\n if self._total_load + course.credit_load > self._max_load + self._overload_cap:\n return False\n\n # Check difficulty rating before adding\n if (self._total_diff + course.difficulty) / (len(self._courses) + 1) > self._max_diff:\n return False\n\n return True",
"def is_course_full(cls, course):\r\n is_course_full = False\r\n if course.max_student_enrollments_allowed is not None:\r\n is_course_full = cls.num_enrolled_in(course.id) >= course.max_student_enrollments_allowed\r\n return is_course_full",
"def check(indivs, geno_list):\r\n\tfor i in xrange(0,len(indivs)):\r\n\t\tif indivs[i] not in geno_list:\r\n\t\t\t# print \"this is not in: \"+ indivs[i]\r\n\t\t\treturn False\r\n\treturn True",
"def __contains__(self, key: str) -> bool:\n return key in self.tables",
"def has_students(self):\n\n students = Student.query.filter_by(class_id=self.id).first()\n link_1 = None\n link_2 = None\n if self.student_on_class != None:\n link_1 = Student.query.filter_by(student_on_class=self.student_on_class).all()\n if self.student_on_class2 != None:\n link_1 = Student.query.filter_by(student_on_class2=self.student_on_class2).all()\n if students != None or link_1 != None or link_2 != None:\n return True\n else:\n return False",
"def has_table(self, table):\n return table in self.get_table_list(\".\" in table)",
"def table_check(tablename, path):\n instance = arcno(path)\n tablelist = [i for i,j in instance.actual_list.items()]\n return True if tablename in tablelist else False",
"def _has_access_course_desc(user, action, course):\r\n def can_load():\r\n \"\"\"\r\n Can this user load this course?\r\n\r\n NOTE: this is not checking whether user is actually enrolled in the course.\r\n \"\"\"\r\n # delegate to generic descriptor check to check start dates\r\n return _has_access_descriptor(user, 'load', course, course.id)\r\n\r\n def can_load_forum():\r\n \"\"\"\r\n Can this user access the forums in this course?\r\n \"\"\"\r\n return (\r\n can_load() and\r\n (\r\n CourseEnrollment.is_enrolled(user, course.id) or\r\n _has_staff_access_to_descriptor(user, course, course.id)\r\n )\r\n )\r\n\r\n def can_enroll():\r\n \"\"\"\r\n First check if restriction of enrollment by login method is enabled, both\r\n globally and by the course.\r\n If it is, then the user must pass the criterion set by the course, e.g. that ExternalAuthMap\r\n was set by 'shib:https://idp.stanford.edu/\", in addition to requirements below.\r\n Rest of requirements:\r\n Enrollment can only happen in the course enrollment period, if one exists.\r\n or\r\n\r\n (CourseEnrollmentAllowed always overrides)\r\n (staff can always enroll)\r\n \"\"\"\r\n # if using registration method to restrict (say shibboleth)\r\n if settings.FEATURES.get('RESTRICT_ENROLL_BY_REG_METHOD') and course.enrollment_domain:\r\n if user is not None and user.is_authenticated() and \\\r\n ExternalAuthMap.objects.filter(user=user, external_domain=course.enrollment_domain):\r\n debug(\"Allow: external_auth of \" + course.enrollment_domain)\r\n reg_method_ok = True\r\n else:\r\n reg_method_ok = False\r\n else:\r\n reg_method_ok = True #if not using this access check, it's always OK.\r\n\r\n now = datetime.now(UTC())\r\n start = course.enrollment_start\r\n end = course.enrollment_end\r\n\r\n if reg_method_ok and (start is None or now > start) and (end is None or now < end):\r\n # in enrollment period, so any user is allowed to enroll.\r\n debug(\"Allow: in enrollment period\")\r\n return True\r\n\r\n # if user is in CourseEnrollmentAllowed with right course key then can also enroll\r\n # (note that course.id actually points to a CourseKey)\r\n # (the filter call uses course_id= since that's the legacy database schema)\r\n # (sorry that it's confusing :( )\r\n if user is not None and user.is_authenticated() and CourseEnrollmentAllowed:\r\n if CourseEnrollmentAllowed.objects.filter(email=user.email, course_id=course.id):\r\n return True\r\n\r\n # otherwise, need staff access\r\n return _has_staff_access_to_descriptor(user, course, course.id)\r\n\r\n def see_exists():\r\n \"\"\"\r\n Can see if can enroll, but also if can load it: if user enrolled in a course and now\r\n it's past the enrollment period, they should still see it.\r\n\r\n TODO (vshnayder): This means that courses with limited enrollment periods will not appear\r\n to non-staff visitors after the enrollment period is over. If this is not what we want, will\r\n need to change this logic.\r\n \"\"\"\r\n # VS[compat] -- this setting should go away once all courses have\r\n # properly configured enrollment_start times (if course should be\r\n # staff-only, set enrollment_start far in the future.)\r\n if settings.FEATURES.get('ACCESS_REQUIRE_STAFF_FOR_COURSE'):\r\n # if this feature is on, only allow courses that have ispublic set to be\r\n # seen by non-staff\r\n if course.ispublic:\r\n debug(\"Allow: ACCESS_REQUIRE_STAFF_FOR_COURSE and ispublic\")\r\n return True\r\n return _has_staff_access_to_descriptor(user, course, course.id)\r\n\r\n return can_enroll() or can_load()\r\n\r\n checkers = {\r\n 'load': can_load,\r\n 'load_forum': can_load_forum,\r\n 'enroll': can_enroll,\r\n 'see_exists': see_exists,\r\n 'staff': lambda: _has_staff_access_to_descriptor(user, course, course.id),\r\n 'instructor': lambda: _has_instructor_access_to_descriptor(user, course, course.id),\r\n }\r\n\r\n return _dispatch(checkers, action, user, course)"
]
| [
"0.63712996",
"0.6357452",
"0.6347225",
"0.6346391",
"0.62091684",
"0.60255164",
"0.5831977",
"0.57686985",
"0.5721567",
"0.56778044",
"0.56285805",
"0.561697",
"0.5616078",
"0.56133604",
"0.5596482",
"0.5576949",
"0.55759954",
"0.5570297",
"0.556247",
"0.5550211",
"0.5531584",
"0.55084616",
"0.54703313",
"0.5448987",
"0.5414071",
"0.53873974",
"0.53868526",
"0.5375108",
"0.53728807",
"0.5357219"
]
| 0.63618577 | 1 |
Funtion to determine if a person with the same id as a new person already exists in the database | def validate_new_person(self, person_id):
self.db_cursor.execute("""SELECT COUNT(*) FROM Person WHERE id == %s""", (person_id,))
ct = self.db_cursor.fetchone()
ct = ct[0]
if ct == 0:
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def case_duplicate(item):\n\n data = item.data\n case_number = data.get(\"case_number\")\n person_id = data.get(\"person_id\")\n\n table = item.table\n if case_number:\n query = (table.case_number == case_number) & \\\n (table.deleted != True)\n else:\n disease_id = data.get(\"disease_id\")\n if person_id and disease_id:\n query = (table.disease_id == disease_id) & \\\n (table.person_id == person_id) & \\\n (table.deleted != True)\n else:\n return\n\n duplicate = current.db(query).select(table.id,\n table.person_id,\n limitby=(0, 1)).first()\n if duplicate:\n item.data.person_id = duplicate.person_id\n item.id = duplicate.id\n item.method = item.METHOD.UPDATE",
"def check_id(self, id):",
"def test_model_can_create_a_person(self):\n old_count = People.objects.count()\n self.actor.save()\n new_count = People.objects.count()\n self.assertNotEqual(old_count, new_count)",
"def check_if_duplicate(self, data):\n\n query = \"SELECT * FROM {} WHERE topic = '{}' AND location = '{}'\\\n \".format(self.table, data['topic'], data['location'])\n\n result = self.fetch_one(query)\n if result:\n return True, 'Meetup with same topic at the same venue\\\n already exists'\n\n query = \"SELECT * FROM {} WHERE happening_on = '{}' AND location = '{}'\\\n \".format(self.table, data['happening_on'], data['location'])\n\n result = self.fetch_one(query)\n if result:\n return True, 'Meetup happening the same date at the same venue \\\n already exists'\n\n query = \"SELECT * FROM {} WHERE topic = '{}' AND happening_on = '{}'\\\n \".format(self.table, data['topic'], data['happening_on'])\n\n result = self.fetch_one(query)\n if result:\n return True, 'Meetup happening the same date with same topic \\\n already exists'\n\n return False, None",
"def add_person(self, per: str):\n if per not in self._people:\n self._people.append(per)\n else:\n raise IDAlreadyExists",
"def test_duplicate_primary_key(self):\n view = SchemaView(SCHEMA)\n patcher = ObjectChanger(schemaview=view)\n dataset = Dataset()\n patcher.apply(AddObject(value=Person(id='P1', name='p1')), dataset)\n patcher.apply(AddObject(value=Person(id='P1', name='p2')), dataset)\n assert dataset.persons[0].id == 'P1'\n self.assertEqual(len(dataset.persons), 2)\n logging.info(dataset.persons[0])\n logging.info(dataset.persons[1])\n patcher.apply(RemoveObject(value=Person(id='P1')), dataset)\n self.assertEqual(len(dataset.persons), 1)",
"def _validate_duplicate_names(res_data, name, _id=None):\n if _id:\n for data in res_data:\n if data.get(\"name\") == name and data.get(\"id\") != _id:\n return False\n return True\n else:\n for data in res_data:\n if data.get(\"name\") == name:\n return False\n return True",
"def testIPerson(self):\n # The id is obtained from the person object directly, uniqueness is enforced\n id = self.person.id\n self.failUnlessEqual(id, 'abc123', \"Person object returned incorrect id.\")",
"def is_person_identifier_used(person_id):\n try:\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n c.execute(\"SELECT personid FROM person WHERE personid =?\", (person_id,))\n person_identifier = \"\"\n is_used = True\n for row in c:\n person_identifier = row[\"personid\"]\n conn.close()\n if len(person_identifier) == 0:\n is_used = False\n if len(person_identifier) > 0:\n is_used = True\n return is_used\n except:\n return False",
"def petExist(animal, pet_id):\n return Animal.objects.filter(pk = pet_id).exists()",
"def registered(id):\n return True",
"def check_person_existence(self, searched_person_id):\n self.__load_persons_from_file_into_memory()\n return super().check_person_existence(searched_person_id)",
"def insert_match_id(match_id):\n conn = get_connect()\n cursor = conn.execute(\"SELECT * FROM match where matchId = ?\", [match_id])\n result_list = cursor.fetchall()\n if len(result_list) == 0:\n conn.execute(\"INSERT INTO match \\\n VALUES (?, 0)\", [match_id])\n print(\"matchId \" + str(match_id) + \" is inserted\")\n else:\n print(\"matchId \" + str(match_id) + \" already exists!\")\n conn.commit()\n conn.close()\n return",
"def check_and_add(email, name):\n\n key = ndb.Key(AddressEntry, email)\n model = key.get()\n # we only have a problem if a model for the given email exists AND the name is different\n if not model is None:\n if model.name != name:\n jdict = model.to_json_dict()\n jdict[\"requested_name\"] = name\n return False, jdict\n\n model = AddressEntry(\n id=email,\n email=email,\n name=name\n )\n model.put()\n return True, model.to_json_dict()",
"def add_person():\n # Find the last used PK\n with sqlite3.connect('skeevy.db') as connection:\n cursor = connection.cursor()\n cursor.execute(\"SELECT id FROM person ORDER BY id DESC;\")\n for row in cursor.fetchone():\n last_pk = row\n\n # Auto-increment the primary key for the person table.\n last_pk = last_pk + 1\n\n # Prompt the user for the rest of their information.\n first_name = input(\"Enter your first name: \")\n middle_name = input(\"Enter your middle name: \")\n last_name = input(\"Enter your last name: \")\n suffix_name = input(\"Enter your suffix: \")\n e_mail = input(\"Enter your email: \")\n # Default status of the person is active (1).\n status = 1\n\n # Store the input in a variable.\n person_data = (last_pk, first_name, middle_name, last_name, suffix_name,\n e_mail, status)\n\n # Connect and insert the data into the person table.\n with sqlite3.connect('skeevy.db') as connection:\n cursor = connection.cursor()\n cursor.execute(\"INSERT INTO person VALUES(?, ?, ?, ?, ?, ?, ?);\",\n person_data)\n connection.commit()",
"def test_createperson(self):\n p = model.Person(firstname=\"Tobias\", lastname=\"Thelen\",\n email=\"[email protected]\", hobbies=[\"singen\",\"springen\",\"fröhlichsein\"])\n id = p.store()\n\n p2 = model.Person(id=id)\n self.assertEqual(p.id, p2.id)\n self.assertEqual(p.firstname, p2.firstname)\n self.assertEqual(p.lastname, p2.lastname)\n self.assertEqual(p.email, p2.email)\n self.assertEqual(p.hobbies, p2.hobbies)",
"def disease_duplicate(item):\n\n data = item.data\n code = data.get(\"code\")\n name = data.get(\"name\")\n\n table = item.table\n queries = []\n if code:\n queries.append((table.code == code))\n if name:\n queries.append((table.name == name))\n if queries:\n query = reduce(lambda x, y: x | y, queries)\n else:\n return\n\n rows = current.db(query).select(table.id,\n table.code,\n table.name)\n duplicate = None\n for row in rows:\n if code and row.code == code:\n duplicate = row.id\n break\n if name and row.name == name:\n duplicate = row.id\n if duplicate:\n item.id = duplicate\n item.method = item.METHOD.UPDATE",
"def test_member_already_exists(self):\n self.login_as(\"bob\")\n\n with self.assertNumQueries(6):\n response = self.client.post(self.url, self.payload)\n self.assert_validation_failed(response, data={\n \"non_field_errors\": [\"You are already a member of this group.\"]\n })\n self.assertEqual(Membership.objects.count(), self.num_memberships)",
"def add_person(self, name, age):\n try:\n age = int(age)\n except Exception as e:\n _log.log(self.log, 'error', str(e).encode())\n return False\n\n person = _models.Person(name=name, age=age)\n\n # person key exists in cache, return false\n if _cache.person_exists(self.redis, self.people_key, person):\n return False\n\n # person key exists in db, add it to cache and return false\n if _orm.person_exists(self.session, _models.Person, _models.Person.name, name):\n try:\n _cache.add_people(self.redis, self.people_key, [person])\n except Exception as e:\n _log.log(self.log, 'warn', str(e).encode())\n \n return False\n\n # add person to db\n try:\n _orm.add_people(self.session, [person])\n _log.add_person(self.log, 'info', self.people_key, person, 'db')\n except Exception as e:\n _log.log(self.log, 'error', str(e).encode())\n return False\n\n # add person to cache\n try:\n _cache.add_people(self.redis, self.people_key, [person])\n _log.add_person(self.log, 'info', self.people_key, person, 'cache')\n except Exception as e:\n _log.log(self.log, 'warn', str(e).encode())\n\n return True",
"def check_duplicate(self, state):\n pass",
"def register(self, name):\n\n if name in self.players.itervalues():\n userPID = dict((self.players[k], k) for k in self.players)[name]\n self._logger.debug(\"Player already exists, giving ID\")\n return (True, {\"playerID\": userPID})\n else:\n newID = _getUniqueInt(self.players.keys())\n self.players[newID] = name\n TournamentSystem._logger.debug(\"Registered %s with playerID %d\",\n name, newID)\n return (True, {\"playerID\": newID})",
"def check_name_uniqueness(cls, user_id, name):\n data_with_same_name = Data.objects.only('id').filter(user_id=user_id, name = name)\n return len(data_with_same_name) == 0",
"def test_duplicate_user(self):\n json_resp = make_user(self.client)\n json_resp = make_user(self.client, username='Blah')\n # email should be taken\n self.assertEqual(json_resp['status'], 'email taken')\n # check only one user in the db\n self.assertEqual(User.query.count(), 1)\n # username should be taken\n json_resp = make_user(self.client, email='[email protected]')\n # check api response\n self.assertEqual(json_resp['status'], 'username taken')",
"def _check_for_preexisting_identifier(self, doi: Doi):\n # The database expects each field to be a list.\n query_criterias = {\"ids\": [doi.pds_identifier]}\n\n # Query database for rows with given id value.\n columns, rows = self._database_obj.select_latest_rows(query_criterias)\n\n for row in rows:\n existing_record = dict(zip(columns, row))\n\n if doi.doi != existing_record[\"doi\"]:\n raise IllegalDOIActionException(\n f\"There is already a DOI {existing_record['doi']} associated \"\n f\"with PDS identifier {doi.pds_identifier} \"\n f\"(status={existing_record['status']}).\\n\"\n f\"You cannot modify a DOI for an existing PDS identifier.\"\n )",
"def update():\n # checking if value already exists or not if not then add it to tabel.\n name = input(\"name you want to add : \")\n phone = int(input(\"PHone number to be added : \"))\n age = int(input(\" address to be added : \"))\n \n cur = mysql.connection.cursor()\n if cur.execute(f\"SELECT * FROM myinfo WHERE phone=2345678\"):\n return \"ALready exists\"\n else:\n cur.execute(\"INSERT INTO myinfo VALUES (%s, %s, %s)\", (name, phone, age))\n mysql.connection.commit()\n cur.close()\n return \"Inserted\"",
"def insert_match(self, gameid):\n if Match.query.filter(Match.gameid == gameid).first():\n self.logger.info(\"Match {} already exists in the DB\".format(gameid))\n return True\n match_json = self.rc.get_match(gameid)\n if not match_json:\n self.logger.warning(\"API did not return data for this gameid: {}\".format(gameid))\n return False\n match_json = self.lower_keys(match_json)\n # Get column names\n match_columns = Match.__table__.columns.keys()\n # Remove all k:v pairs that do not match column names\n to_del = []\n for k, v in match_json.items():\n if k not in match_columns:\n to_del.append(k)\n # del match_json[k]\n for k in to_del:\n del match_json[k]\n match = Match(**match_json)\n match.gamecreation = datetime.utcfromtimestamp(match.gamecreation // 1000)\n self.db.session.add(match)\n self.db.session.commit()\n return True",
"def test_id_already_exists(self) -> None:\n with pytest.raises(IntegrityError):\n ObservationType.add({'id': 1, 'name': 'New Type', 'units': 'Kilo-Frobnicate',\n 'description': 'A new filter type.'})",
"def save_person(self, person_id, person_name, person_role):\n cursor = self.cur()\n cursor.execute('INSERT OR IGNORE INTO person (person_id, name, role) VALUES(?, ?, ?)',\n (person_id, person_name, person_role)\n )",
"def test_id_uniqueness(self):\n user_2 = User()\n self.assertNotEqual(self.user_1.id, user_2.id)",
"def insert_one(collection, data):\n try:\n return collection.insert_one(data).inserted_id == data['_id']\n except Exception as error:\n print(error)\n raise"
]
| [
"0.6410337",
"0.6204078",
"0.6095712",
"0.60917",
"0.6081425",
"0.60685897",
"0.60517544",
"0.6018271",
"0.5990265",
"0.59788346",
"0.59537405",
"0.5939683",
"0.58824253",
"0.58821607",
"0.5877335",
"0.58575386",
"0.584312",
"0.5832143",
"0.5791309",
"0.5790564",
"0.5783818",
"0.5774986",
"0.5768749",
"0.575805",
"0.5685359",
"0.5683722",
"0.5671418",
"0.5670894",
"0.5662897",
"0.5653184"
]
| 0.74958897 | 0 |
Function to determine if a topic with the same id as the new topic already exists in the database | def validate_new_topic(self, topic_id):
self.db_cursor.execute("""SELECT COUNT(*) FROM Topic WHERE id == %s""", (topic_id,))
ct = self.db_cursor.fetchone()
ct = ct[0]
if ct == 0:
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_new_curriculum_topics(self, curriculum_topics):\n\n for cur in curriculum_topics:\n # check to make sure its in the general topics table\n self.db_cursor.execute(\"\"\"SELECT COUNT(*) FROM Topic WHERE name = %s\"\"\", (cur,))\n ct = self.db_cursor.fetchone()\n ct = ct[0]\n if ct == 0:\n print(\"topic does not exist, we must create new one or cancel\") # todo\n\n return True",
"def _create_topic_if_not_exists(self, topic):\n if topic in self.environments['cluster'].kafka.consumer().topics():\n return True\n\n new_topic = NewTopic(name=topic, num_partitions=MAX_CONCURRENCY*2, replication_factor=1)\n admin_client = KafkaAdminClient(bootstrap_servers=self.environments['cluster'].kafka.brokers,\n request_timeout_ms=180000)\n admin_client.create_topics(new_topics=[new_topic], timeout_ms=180000)\n return False",
"def check_if_duplicate(self, data):\n\n query = \"SELECT * FROM {} WHERE topic = '{}' AND location = '{}'\\\n \".format(self.table, data['topic'], data['location'])\n\n result = self.fetch_one(query)\n if result:\n return True, 'Meetup with same topic at the same venue\\\n already exists'\n\n query = \"SELECT * FROM {} WHERE happening_on = '{}' AND location = '{}'\\\n \".format(self.table, data['happening_on'], data['location'])\n\n result = self.fetch_one(query)\n if result:\n return True, 'Meetup happening the same date at the same venue \\\n already exists'\n\n query = \"SELECT * FROM {} WHERE topic = '{}' AND happening_on = '{}'\\\n \".format(self.table, data['topic'], data['happening_on'])\n\n result = self.fetch_one(query)\n if result:\n return True, 'Meetup happening the same date with same topic \\\n already exists'\n\n return False, None",
"def topic_exists(topic):\n client = AdminClient({\"bootstrap.servers\": \"PLAINTEXT://localhost:9092\"})\n topic_metadata = client.list_topics(timeout=5)\n return topic in set(t.topic for t in iter(topic_metadata.topics.values()))",
"def set_topic(self, new_topic, updating=False):\n\n TOPIC_QUERY = \"\"\"UPDATE Topic SET name = %s WHERE id = %s\"\"\" if updating \\\n else \"\"\"INSERT INTO Topic (name, id) VALUES (%s, %s)\"\"\"\n\n\n\n self.db_cursor.execute(TOPIC_QUERY, (new_topic.name, new_topic.id))\n self.db_connection.commit()",
"def is_valid_topic(index):\n return all_topics[index][1] == \"1\"",
"def get_or_create(cls, topic):\n\t\treturn cls.get_or_insert(FeedRecord.create_key_name(topic), topic=topic)",
"def _create_topic_if_not_exists(self, topic_name):\n creation_result = self.conn.create_topic(topic_name)\n return creation_result['CreateTopicResponse']['CreateTopicResult']['TopicArn']",
"def exists( identifier ):\n return note.exists(identifier)",
"def test_suggested_topic_success_create(self):\n owner = CustomUser.objects.get(id=301)\n created_suggested_topic = SuggestedTopics.create(owner=owner,\n name='name',\n description = 'description')\n\n self.assertIsInstance(created_suggested_topic, SuggestedTopics)",
"def if_already_present(video_id: str) -> bool:\n return Video.objects.filter(video_id=video_id).exists()",
"def topic_fully_replicated(self, topic):\n topic = _coerce_topic(topic)\n if topic not in self.topic_partitions:\n return False\n if not self.topic_partitions[topic]:\n # Don't consider an empty partition list 'fully replicated'\n return False\n return all(\n self.partition_fully_replicated(TopicAndPartition(topic, p))\n for p in self.topic_partitions[topic])",
"def is_article_duplicate(cls, article):\n return cls.db.hkeys(\"article_map\").count(article.link) != 0",
"def test_topic_tracker_needs_update(database, user, topic):\n forumsread = ForumsRead.query.\\\n filter(ForumsRead.user_id == user.id,\n ForumsRead.forum_id == topic.forum_id).first()\n\n topicsread = TopicsRead.query.\\\n filter(TopicsRead.user_id == user.id,\n TopicsRead.topic_id == topic.id).first()\n\n with current_app.test_request_context():\n assert topic.tracker_needs_update(forumsread, topicsread)\n\n # Update the tracker\n topicsread = TopicsRead()\n topicsread.user_id = user.id\n topicsread.topic_id = topic.id\n topicsread.forum_id = topic.forum_id\n topicsread.last_read = datetime.utcnow()\n topicsread.save()\n\n forumsread = ForumsRead()\n forumsread.user_id = user.id\n forumsread.forum_id = topic.forum_id\n forumsread.last_read = datetime.utcnow()\n forumsread.save()\n\n # Now the topic should be read\n assert not topic.tracker_needs_update(forumsread, topicsread)\n\n post = Post(content=\"Test Content\")\n post.save(topic=topic, user=user)\n\n assert topic.tracker_needs_update(forumsread, topicsread)",
"def delete_addition_by_id(self,id):\r\n \r\n session = self.persistence.get_session() \r\n affected_rows = session.query(TopicAddition).filter(TopicAddition.id==id).delete()\r\n session.commit()\r\n\r\n if (affected_rows < 1): \r\n raise NoAffectedRows",
"def already_created(self, ticket_id, toggl_projects):\n project_prepends = [p['name'].split()[0][1:] for p in toggl_projects]\n if str(ticket_id) in project_prepends:\n return True\n return False",
"def apply(self, topic):\n return topic.__name__.charAt(0) == '/'",
"def topic_exists(self, topic_name):\n\n topic_info = self.kafka_client.list_topics(timeout=5)\n \n return topic_name in set(t.topic for t in iter(topic_info.topics.values()))",
"def check_exists(cls, topics):\n\t\tresult = []\n\t\tfor known_feed in cls.get([cls.create_key(url) for url in set(topics)]):\n\t\t\tif known_feed is not None:\n\t\t\t\tresult.append(known_feed.topic)\n\t\treturn result",
"def add_new_topic_to_db(self, topic_obj):\n # todo: don't need this anymore\n self.db_cursor.execute(\"\"\"INSERT INTO Topic (id, name) VALUES (%s, %s)\"\"\", (topic_obj.id, topic_obj.name))\n self.db_connection.commit()",
"def exists_and_newer(targetfile, topicfile):\n try:\n if getmtime(targetfile) >= getmtime(topicfile):\n return True\n else:\n return False\n except IOError:\n return False",
"def has_new_entry(self):\n if self.new_entry:\n self.new_entry -= 1\n return True",
"def test_topic_notification_create_maybe(self):\n user = utils.create_user()\n topic = utils.create_topic(self.category)\n comment = utils.create_comment(topic=topic)\n TopicNotification.create_maybe(user=user, comment=comment)\n notification = TopicNotification.objects.get(user=user, topic=topic)\n self.assertTrue(notification.is_active)\n self.assertTrue(notification.is_read)\n self.assertEqual(notification.action, COMMENT)\n\n # Creating it again should do nothing\n (TopicNotification.objects\n .filter(user=user, topic=topic)\n .update(is_active=False))\n TopicNotification.create_maybe(user=user, comment=comment)\n self.assertFalse(\n TopicNotification.objects.get(user=user, topic=topic).is_active)",
"def is_topic(cls, topic: str) -> bool:\n return re.match(AsrTrainSuccess.TOPIC_PATTERN, topic) is not None",
"def check_id(self, id):",
"def test_topic_save(forum, user):\n post = Post(content=\"Test Content\")\n topic = Topic(title=\"Test Title\")\n\n assert forum.last_post_id is None\n assert forum.post_count == 0\n assert forum.topic_count == 0\n\n topic.save(forum=forum, post=post, user=user)\n\n assert topic.title == \"Test Title\"\n\n topic.title = \"Test Edit Title\"\n topic.save()\n\n assert topic.title == \"Test Edit Title\"\n\n # The first post in the topic is also the last post\n assert topic.first_post_id == post.id\n assert topic.last_post_id == post.id\n\n assert forum.last_post_id == post.id\n assert forum.post_count == 1\n assert forum.topic_count == 1",
"def is_already_linked(ticket_id):\n exists_query = db.session.query(\n all_models.IssuetrackerIssue.issue_id\n ).filter_by(issue_id=ticket_id).exists()\n return db.session.query(exists_query).scalar()",
"def test_exists_true(self):\n self.assertTrue(Sample.exists(self.sample_id, self.sample_template))",
"def _check_if_duplicate_subject_identifier(self, using):\n if not self.pk and self.subject_identifier:\n if self.__class__.objects.using(using).filter(subject_identifier=self.subject_identifier):\n raise IdentifierError('Attempt to insert duplicate value for '\n 'subject_identifier {0} when saving {1} '\n 'on add.'.format(self.subject_identifier, self))\n else:\n if self.__class__.objects.using(using).filter(\n subject_identifier=self.subject_identifier).exclude(pk=self.pk):\n raise IdentifierError('Attempt to insert duplicate value for '\n 'subject_identifier {0} when saving {1} '\n 'on change.'.format(self.subject_identifier, self))\n self.check_for_duplicate_subject_identifier()",
"def add_topic(request, forum_id):\n\t\n\t\n\tif request.POST:\n\t\tpage_data = request.POST.copy()\n\t\tpage_data['topic_author'] = str(request.user)\n\t\ttags = findall( r'(?xs)\\[code\\](.*?)\\[/code\\]''', page_data['text'])\n\t\tfor i in tags:\n\t\t\tpage_data['text'] = page_data['text'].replace(u'[code]'+i+u'[/code]', u'[code]'+base64.encodestring(i)+u'[/code]')\n\t\tpage_data['text'] = html2safehtml(page_data['text'] ,valid_tags=settings.VALID_TAGS)\n\t\ttags = findall( r'(?xs)\\[code\\](.*?)\\[/code\\]''', page_data['text'])\n\t\tfor i in tags:\n\t\t\tpage_data['text'] = page_data['text'].replace(u'[code]'+i+u'[/code]', u'[code]'+base64.decodestring(i)+u'[/code]')\n\t\ttext = page_data['text']\n\t\tdel page_data['text']\n\t\tpage_data['topic_name'] = html2safehtml(page_data['topic_name'] ,valid_tags=())\n\t\tpage_data['topic_forum'] = forum_id\n\t\tpage_data['topic_posts'] = 1\n\t\tpage_data['topic_lastpost'] = str(request.user)+'<br />' + str(datetime.today())[:-10]\n\t\tpage_data['topic_last_pagination_page'] = 1\n\t\tpage_data['topic_modification_date'] = datetime.now()\n\t\tform = AddTopicForm(page_data)\n\t\tif form.is_valid():\n\t\t\tnew_place = form.save()\n\t\t\tpost = Post(post_topic = new_place, post_text = text, post_author = str(request.user), post_ip = request.META['REMOTE_ADDR'])\n\t\t\tpost.save()\n\t\t\tforum = Forum.objects.get(id=forum_id)\n\t\t\tforum.forum_topics = forum.forum_topics +1\n\t\t\tforum.forum_posts = forum.forum_posts +1\n\t\t\tforum.forum_lastpost = str(request.user)+' (' + str(datetime.today())[:-10] + ')<br /><a href=\"/forum/topic/1/' + str(new_place.id) + '/\">' + new_place.topic_name + '</a>'\n\t\t\tforum.save()\n\t\t\t\n\t\t\tmail_admins('Temat Dodany', \"Dodano Temat: http://www.\" + settings.SITE_KEY + \"/forum/forum/\" + forum_id +\"/\", fail_silently=True)\n\t\t\t\n\t\t\treturn HttpResponseRedirect(\"/forum/forum/\" + forum_id +\"/\")\n\t\telse:\n\t\t\treturn render_to_response(\n\t\t\t\t'myghtyboard/add_topic.html',\n\t\t\t\t{'form': form, 'perms': list_perms(request)},\n\t\t\t\tcontext_instance=RequestContext(request))\n\t\n\tform = AddTopicForm()\n\treturn render_to_response(\n\t\t'myghtyboard/add_topic.html',\n\t\t{'form': form, 'perms': list_perms(request)},\n\t\tcontext_instance=RequestContext(request))"
]
| [
"0.68671364",
"0.68062276",
"0.6378161",
"0.6126449",
"0.5916333",
"0.5914549",
"0.5872192",
"0.57884616",
"0.5726345",
"0.5713697",
"0.5695193",
"0.5675286",
"0.56705123",
"0.56675947",
"0.565931",
"0.5653919",
"0.5647924",
"0.5600276",
"0.5597816",
"0.55932456",
"0.5590022",
"0.5538513",
"0.55315334",
"0.55243874",
"0.5499884",
"0.5495938",
"0.5486258",
"0.54659516",
"0.5462138",
"0.5457767"
]
| 0.78769946 | 0 |
Function to add a brand new topic to the database | def add_new_topic_to_db(self, topic_obj):
# todo: don't need this anymore
self.db_cursor.execute("""INSERT INTO Topic (id, name) VALUES (%s, %s)""", (topic_obj.id, topic_obj.name))
self.db_connection.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def insert_topic(self,text,addition,year,user):\r\n topic = Topic(date=date.today(),text=text,year=year,user=user)\r\n topic.addition = addition\r\n \r\n session = self.persistence.get_session()\r\n session.add(topic)\r\n session.commit()",
"def new_topic(request):\n form = Form(request, schema=TopicSchema)\n if form.validate():\n topic = form.data[\"title\"]\n author = form.data[\"author\"]\n desc = form.data[\"description\"]\n date = datetime.datetime.now()\n url = slugfy(topic)\n topic_tuple = {\n \"title\": topic,\n \"url\": url,\n \"author\": author,\n \"description\": desc,\n \"topic_date\": date.strftime(\"%d/%m/%Y\"),\n }\n request.db[\"topic\"].insert(topic_tuple)\n return HTTPFound(location=\"/\")\n\n return render_to_response(\n \"templates/new_topic.html\",\n {\"form\": FormRenderer(form), \"count\": count(request)},\n request=request,\n )",
"def set_topic(self, new_topic, updating=False):\n\n TOPIC_QUERY = \"\"\"UPDATE Topic SET name = %s WHERE id = %s\"\"\" if updating \\\n else \"\"\"INSERT INTO Topic (name, id) VALUES (%s, %s)\"\"\"\n\n\n\n self.db_cursor.execute(TOPIC_QUERY, (new_topic.name, new_topic.id))\n self.db_connection.commit()",
"def create_topic():\n nodes = Node.query.all()\n form = TopicForm(nodes)\n if request.method == 'POST':\n topic = Topic(title=request.form.get('title'),\n content=request.form.get('content'),\n node_id=request.form.get('node_id'),\n user=current_user._get_current_object())\n db.session.add(topic)\n db.session.commit()\n return jsonify({\"result\": 'ok'})\n\n return render_template('main/create_topic.html', nodes=nodes, form=form)",
"def add_topic(request, forum_id):\n\t\n\t\n\tif request.POST:\n\t\tpage_data = request.POST.copy()\n\t\tpage_data['topic_author'] = str(request.user)\n\t\ttags = findall( r'(?xs)\\[code\\](.*?)\\[/code\\]''', page_data['text'])\n\t\tfor i in tags:\n\t\t\tpage_data['text'] = page_data['text'].replace(u'[code]'+i+u'[/code]', u'[code]'+base64.encodestring(i)+u'[/code]')\n\t\tpage_data['text'] = html2safehtml(page_data['text'] ,valid_tags=settings.VALID_TAGS)\n\t\ttags = findall( r'(?xs)\\[code\\](.*?)\\[/code\\]''', page_data['text'])\n\t\tfor i in tags:\n\t\t\tpage_data['text'] = page_data['text'].replace(u'[code]'+i+u'[/code]', u'[code]'+base64.decodestring(i)+u'[/code]')\n\t\ttext = page_data['text']\n\t\tdel page_data['text']\n\t\tpage_data['topic_name'] = html2safehtml(page_data['topic_name'] ,valid_tags=())\n\t\tpage_data['topic_forum'] = forum_id\n\t\tpage_data['topic_posts'] = 1\n\t\tpage_data['topic_lastpost'] = str(request.user)+'<br />' + str(datetime.today())[:-10]\n\t\tpage_data['topic_last_pagination_page'] = 1\n\t\tpage_data['topic_modification_date'] = datetime.now()\n\t\tform = AddTopicForm(page_data)\n\t\tif form.is_valid():\n\t\t\tnew_place = form.save()\n\t\t\tpost = Post(post_topic = new_place, post_text = text, post_author = str(request.user), post_ip = request.META['REMOTE_ADDR'])\n\t\t\tpost.save()\n\t\t\tforum = Forum.objects.get(id=forum_id)\n\t\t\tforum.forum_topics = forum.forum_topics +1\n\t\t\tforum.forum_posts = forum.forum_posts +1\n\t\t\tforum.forum_lastpost = str(request.user)+' (' + str(datetime.today())[:-10] + ')<br /><a href=\"/forum/topic/1/' + str(new_place.id) + '/\">' + new_place.topic_name + '</a>'\n\t\t\tforum.save()\n\t\t\t\n\t\t\tmail_admins('Temat Dodany', \"Dodano Temat: http://www.\" + settings.SITE_KEY + \"/forum/forum/\" + forum_id +\"/\", fail_silently=True)\n\t\t\t\n\t\t\treturn HttpResponseRedirect(\"/forum/forum/\" + forum_id +\"/\")\n\t\telse:\n\t\t\treturn render_to_response(\n\t\t\t\t'myghtyboard/add_topic.html',\n\t\t\t\t{'form': form, 'perms': list_perms(request)},\n\t\t\t\tcontext_instance=RequestContext(request))\n\t\n\tform = AddTopicForm()\n\treturn render_to_response(\n\t\t'myghtyboard/add_topic.html',\n\t\t{'form': form, 'perms': list_perms(request)},\n\t\tcontext_instance=RequestContext(request))",
"def AddTopic(self, topic_obj):\n self.topics.append(topic_obj)",
"def create_topic(self, topic_name):\n self.topics[topic_name] = []",
"def create_topic(project_id, topic_id):\n topic_path = PUBLISHER_CLIENT.topic_path(project_id, topic_id)\n topic = PUBLISHER_CLIENT.create_topic(request={\"name\": topic_path})\n print(\"Created topic: {}\".format(topic.name))",
"def add_topic(request):\n template = loader.get_template('topicAdd.html')\n try:\n topic = serializers.serialize(\"json\", Topic.objects.filter())\n except ObjectDoesNotExist:\n return HttpResponse(\"This topic doesn't exists!\")\n context = {\n 'topics': topic\n }\n\n if request.method == \"POST\":\n data = JSONParser().parse(request)\n\n # Add topic to database.\n try:\n Topic.objects.get(name=data[\"name\"])\n print(\"topic exists\")\n return HttpResponse(\"This topic exists\")\n except ObjectDoesNotExist:\n try:\n user = User.objects.get(username=request.user)\n except ObjectDoesNotExist:\n return JsonResponse({'status':'false','message':'You should login to create a topic!'}, status=401)\n name = data[\"name\"]\n topicObject = Topic.objects.create(name=name, user=user)\n for tag in data[\"tags\"]:\n tag_name = tag['label']\n if tag_name == '':\n continue\n tag_wiki_id = tag['id']\n try:\n tagObject = Tag.objects.get(wikidataID=tag_wiki_id)\n except ObjectDoesNotExist:\n tagObject = Tag.objects.create(name=tag_name, wikidataID=tag_wiki_id)\n except MultipleObjectsReturned:\n return HttpResponse(\"Multiple tags exist for.\" + tag + \" Invalid State.\")\n\n #hidden tags\n unique_hidden_tags = list(set(tag['hidden_tags']))\n if unique_hidden_tags:\n tagObject.hidden_tags = unique_hidden_tags\n # for hidden_tag in unique_hidden_tags:\n # try:\n # hiddenTagObject = Tag.objects.get(wikidataID=hidden_tag)\n # except ObjectDoesNotExist:\n # hiddenTagObject = Tag.objects.create(wikidataID=hidden_tag, hidden=True)\n # hiddenTagObject.save()\n tagObject.save()\n topicObject.tags.add(tagObject)\n context = {\n }\n\n # Add relationship to database.\n relates_to = data[\"relates_to\"]\n for relation in data[\"relates_to\"]:\n if relation['topic_id'] == '':\n continue\n try:\n relatedTopicObject = Topic.objects.get(pk=relation['topic_id'])\n label = relation['rel_name']\n relationObject = Relation.objects.create(topic_from=topicObject, topic_to=relatedTopicObject, label=label)\n except ObjectDoesNotExist:\n print(\"error\")\n return HttpResponse(\"Related topic does not exist\");\n except MultipleObjectsReturned:\n print(\"error\")\n return HttpResponse(\"This topic exists\")\n # End of add relationship to database.\n\n\n # Adding a post to new created topic\n\n if data[\"postAdd\"] == True:\n postStuff = data[\"post\"]\n content = postStuff[\"post_content\"]\n postObject = Post.objects.create(content=content, user=user, topic=topicObject)\n for tag in postStuff[\"post_tags\"]:\n if len(tag)>0:\n if tag['label'] == '':\n continue\n try:\n tagObject = Tag.objects.get(wikidataID=tag['id'])\n except ObjectDoesNotExist:\n tagObject = Tag.objects.create(wikidataID=tag['id'], name=tag['label'])\n except MultipleObjectsReturned:\n return HttpResponse(\"Multiple tags exist for.\" + tag + \" Invalid State.\")\n\n unique_hidden_tags = list(set(tag['hidden_tags']))\n if unique_hidden_tags:\n tagObject.hidden_tags = unique_hidden_tags\n\n tagObject.save()\n postObject.tags.add(tagObject)\n # End of adding a post to new created topic\n\n return HttpResponse(template.render(context, request))\n return HttpResponse(template.render(context, request))",
"def insert_addition(self, text, user):\r\n added_at = date.today()\r\n addition = TopicAddition(date=added_at, text=text, user=user)\r\n \r\n session = self.persistence.get_session() \r\n session.add(addition)\r\n session.commit()",
"def set_course_topic(self, topic_id,course_name):\n self.db_cursor.execute(\n \"\"\"INSERT INTO CourseTopics (course_name, topic_id) VALUES (%s, %s)\"\"\",\n (course_name, topic_id))\n self.db_connection.commit()",
"def add_topic ( topics , stream = -1 ) :\n return Ostap.Utils.AddTopic ( topics , level , stream )",
"def create_topic (self):\n return self.tm.create_topic()",
"def post(self):\n s = ScuttlebuttService()\n try:\n topic_dict = simplejson.loads(self.request.body)\n topic = s.CreateTopic(topic_dict)\n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(simplejson.dumps(topic.ToDict()))\n except simplejson.JSONDecodeError:\n # HTTP 400 for bad syntax.\n self.response.set_status(\n 400, 'Failed to create topic. Invalid JSON: %s' % self.request.body)\n except Exception, e:\n # HTTP 422 for syntactically correct but semantically wrong.\n self.response.set_status(422, 'Error creating topic: %s' % e)",
"def addTopic(self, topic):\n # Existing topics take priority\n if not self.helpTopics.containsKey(topic.__name__):\n self.helpTopics.put(topic.__name__, topic)",
"def set_topic(event_id, topic):\n connection = get_connection()\n cursor = connection.cursor()\n sql_string = \"UPDATE Event SET Topic='\"+topic+\"' WHERE eventID=\"+str(event_id)\n cursor.execute(sql_string)\n connection.commit()",
"def topic(self, topic):\n self.connection.topic(str(self), topic)",
"def topic(self, msg):\n self.make_topic(msg, new_topic=msg.args[0])\n self.bot.log.info(\"Topic changed by \" + msg.user)",
"def create(cls, topic):\n\t\treturn cls(key_name=utils.get_hash_key_name(topic), topic=topic)",
"def createTopic():\n data = request.json\n if \"agenda_id\" in data and \"section_position\" in data and \"topic_position\" in data and \"topic_json\" in data:\n if connectMongo.getAgendaById(data.get(\"agenda_id\")).found:\n responseWrapper = connectMongo.createNewTopic(data.get(\"agenda_id\"), data.get(\"section_position\"),\n data.get(\"topic_position\"),\n data.get(\"topic_json\"))\n return jsonify(response=200, agenda=responseWrapper.object.makeJson())\n else:\n return jsonify(response=404, msg=\"Agenda not found\")\n else:\n return jsonify(response=400, msg=\"you didn't sent all the necessary information\")",
"def create_topic(self, topic):\r\n params = {'ContentType' : 'JSON',\r\n 'Name' : topic}\r\n response = self.make_request('CreateTopic', params, '/', 'GET')\r\n body = response.read()\r\n if response.status == 200:\r\n return json.loads(body)\r\n else:\r\n boto.log.error('%s %s' % (response.status, response.reason))\r\n boto.log.error('%s' % body)\r\n raise self.ResponseError(response.status, response.reason, body)",
"def CreateTopic(self, TopicId, TopicStrings=None):\n if len(TopicStrings) >= 2:\n ticker, field = TopicStrings\n logging.info(f\"CreateTopic {TopicId}, {ticker}|{field}\")\n if not ticker:\n return None\n\n if ticker == \"set_token\":\n self.finnhub_token = field\n self.start_conn_event.set()\n\n new_topic = SimpeVarTopic(TopicId, TopicStrings)\n self.topics_by_key[(ticker)] = field\n self.updatedTopics[TopicId] = \"Finnhub token set\"\n else:\n new_topic = StockTickTopic(TopicId, TopicStrings)\n ticker = ticker.upper()\n self.topics_by_key[(ticker, field)] = new_topic\n subscribe_msg = f\"{{\\\"type\\\":\\\"subscribe\\\",\\\"symbol\\\":\\\"{ticker}\\\"}}\"\n logging.debug(subscribe_msg)\n try:\n self.async_loop.call_soon_threadsafe(lambda: self.send_message_queue.put_nowait(subscribe_msg))\n except Exception as e:\n logging.error(\"CreateTopic: {}\".format(repr(e)))\n else:\n logging.error(f\"Unknown param: CreateTopic {TopicId}, {TopicStrings}\")\n return None\n return new_topic",
"def _create_topic(self):\n topic_name = self.generate_name()\n try:\n topic = self.sns.create_topic(Name=topic_name)\n except Exception as e:\n raise RuntimeError('SNS could create topic: %s' % e)\n self.topic_name, self.topic = topic_name, topic",
"def create_topic_branch(self, topic_branch_name):\n print(\"Creating topic branch locally...\")\n self.git.checkout(self.base_branch)\n self.git.checkout('-b', topic_branch_name)\n print(\"Pushing topic branch to base branch's remote...\")\n self.git.push('-u', self.base_branch_remote(), topic_branch_name)",
"def perform_create(self, serializer):\n topic = models.ProfileTopic.objects.get(pk=self.kwargs.get(\"pk\"))\n\n return serializer.save(topic=topic)",
"def test_create_topic_viewset(self):\n\n data = {\n 'title': 'Test Topic',\n 'description': 'Test topic description',\n 'body': 'Test topic body',\n 'section': 'CONVERSATION',\n 'tags': 'test'\n }\n response = self.client.post(reverse('api:topics-list'), data)\n self.assertTrue(response.status_code == status.HTTP_201_CREATED)\n created_topic = Topic.objects.last()\n self.assertTrue(created_topic)\n self.assertEqual(created_topic.title, data['title'])\n self.assertEqual(created_topic.description, data['description'])\n self.assertEqual(created_topic.body, data['body'])\n self.assertEqual(created_topic.section, data['section'])",
"def edit_topic():\n topic = db.topic(request.args(0))\n form = SQLFORM(db.topic, record=topic)\n form.vars.description = text_store_read(topic.description)\n if form.validate():\n topic.update_record(\n name=form.vars.name,\n )\n text_store_write(form.vars.description, key=topic.description)\n session.flash = T('The topic has been created')\n redirect(URL('default', 'index'))\n return dict(form=form)",
"def test_suggested_topic_success_create(self):\n owner = CustomUser.objects.get(id=301)\n created_suggested_topic = SuggestedTopics.create(owner=owner,\n name='name',\n description = 'description')\n\n self.assertIsInstance(created_suggested_topic, SuggestedTopics)",
"def sns_create_topic(session, topic):\n if session is None:\n return None\n\n client = session.client(\"sns\")\n response = client.create_topic(Name=topic)\n print(response)\n if response is None:\n return None\n else:\n return response['TopicArn']",
"def topic_posting_new(self, request):\n col = ObjectPostings(self.cdb_object_id, {})\n return posting_new(col, request)"
]
| [
"0.77887875",
"0.74170333",
"0.72958183",
"0.72625315",
"0.6993564",
"0.6992848",
"0.69752526",
"0.6846889",
"0.68312746",
"0.68089825",
"0.6718152",
"0.666205",
"0.66021574",
"0.6561286",
"0.6547",
"0.6530548",
"0.6456005",
"0.6446164",
"0.64442694",
"0.64338785",
"0.6417663",
"0.63528925",
"0.632554",
"0.63019276",
"0.6287331",
"0.62834007",
"0.62788254",
"0.62461466",
"0.61840737",
"0.61820436"
]
| 0.7786357 | 1 |
Function to retrieve a topic from the database to store in client model | def get_topic(self, id):
TOPIC = """SELECT COUNT(*) FROM Topic WHERE id = %s"""
ret = None
try:
self.db_cursor.execute("""SELECT name, id FROM Topic WHERE id = %s""", (id,))
t = self.db_cursor.fetchall()
ret = Topic()
ret.name = t[0][0]
ret.id = id
except:
logging.warning("DBAdapter: Error- cannot retrieve topic with id " + str(id))
return None
return ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(cls, topic_info):\n try: #to treat topic info as topic.id\n return Topic.query.get(int(topic_info))\n except Exception: #treat topic info as topic.name\n return Topic.query.filter_by(name=topic_info).first()",
"def get_topic(topic_id):\n topic = db_session.query(Topic).filter_by(id=topic_id).one()\n return jsonify(topic.serialize)",
"def get_by_topic(cls, topic):\n\t\treturn cls.get_by_key_name(get_hash_key_name(topic))",
"def GetTopic(self, topic_id):\n return self._TopicSearchHelper(self.topics, topic_id)",
"def get_topic(self,topic_path):\n twiki_cgi = \"{:s}/bin/view/{:s}\".format(self.url,topic_path)\n\n params = {'username': self.settings['auth']['username'],\n 'password': self.settings['auth']['password'],\n 'raw': 'text'}\n response = self.session.get(twiki_cgi, params=params)\n\n return response",
"def get_topic(title):\n return Topic.get(Topic.title == title)",
"def topic(self, topic_id):\r\n return contents.Topic(self, topic_id)",
"def topic(self, topic_id):\r\n return topics.Topic(self, topic_id)",
"def get_topic(self):\n return self.topic",
"def topic_index():\n topic = db.topic(request.args(0)) or redirect(URL('default', 'index'))\n return dict(topic=topic)",
"def get_topic_of_question(question):\n dynamodb = boto3.resource(\"dynamodb\", region_name=\"eu-central-1\")\n topic_table = dynamodb.Table(\"Topics\")\n\n topic_id = question.get(\"TopicId\")\n # query topic_id of the question\n try:\n response = topic_table.get_item(Key={\"TopicId\": topic_id})\n topic = response[\"Item\"]\n except:\n print(\"No topic found, returning None..\")\n return None\n return topic",
"def get_class_topic(class_id):\n topic_data = query_db(\"SELECT id, name FROM topics WHERE class_id=?\", [class_id])\n topics = []\n for topic in topic_data:\n topic_dict_class = {}\n topic_dict_class[\"id\"] = topic[0]\n topic_dict_class[\"name\"] = topic[1]\n topics.append(topic_dict_class)\n return topics",
"def topic(self, topic):\n self.connection.topic(str(self), topic)",
"def topic(request, topic_id):\n\ttopic = Topic.objects.get(id=topic_id)\n\tvocabs = topic.vocab_set.all()\n\tcontext = {'topic': topic, 'vocabs':vocabs}\n\treturn render(request, 'Toeic/topic.html', context)",
"def topic(self):\n return self.config.get('topic', f'{NAMESPACE}/{self.id}')",
"def test_retrieve_topic_viewset(self):\n\n topic = TopicFactory(author=self.user)\n response = self.client.get(reverse('api:topics-detail', kwargs={'topic_id': topic.id}))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.get('title'), topic.title)",
"def topic(self, topic_id):\n return topics.Topic(self, topic_id)",
"def get_topics():\n topics, _ = base_query(db_session)\n return jsonify([p.serialize for p in topics])",
"def _get_generic_topic(self):\n\n content_type = ContentType.objects.get_for_model(self.__class__)\n app_label = content_type.app_label\n\n return super(ProducerModel, self)._get_generic_topic(identifier=app_label)",
"def _get_topic_for_response():\n return _get_topic_base() + \"res/\"",
"def test_single_topic_retrieval_authenticated(self):\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.free_token.key)\n response = self.client.get('/topic/Topic 1/', format='json')\n data = json.loads(response.content)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(data['name'],'Topic 1')\n self.assertEqual(data['description'],'The first topic.')",
"def topicnews(topic):\n urlnews=urltop\n url=urlnews+topic\n urlapi=url+'&'+'apiKey='\n urlcoun=urlapi+apikey\n response=requests.get(urlcoun)\n data=response.json()\n return data",
"def topic(self):\n return self._topic_name",
"def __init__(self, topic):\n self.topic = topic",
"def test_topicsread(topic, user):\n topicsread = TopicsRead()\n topicsread.user_id = user.id\n topicsread.topic_id = topic.id\n topicsread.forum_id = topic.forum_id\n topicsread.last_read = datetime.utcnow()\n topicsread.save()\n assert topicsread is not None\n\n topicsread.delete()\n topicsread = TopicsRead.query.\\\n filter_by(topic_id=topicsread.topic_id).\\\n first()\n assert topicsread is None",
"def topic(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"topic\")",
"def get_course_topic(self, topic_id, course_name):\n ret = None\n try:\n self.db_cursor.execute(\n \"\"\"SELECT course_name, topic_id FROM CourseTopics WHERE course_name = %s AND topic_id = %s\"\"\",\n (course_name, topic_id))\n ct = self.db_cursor.fetchall()\n if ct:\n cname = ct[0][0]\n ctopic = ct[0][1]\n ret = [cname, ctopic]\n else:\n ret = None\n\n except:\n logging.warning(\"DBAdapter: Error- cannot retrieve course topic: \" + str(id))\n\n return ret",
"def get_questions_of_topic(topic):\n\n dynamodb = boto3.resource(\"dynamodb\", region_name=\"eu-central-1\")\n question_table = dynamodb.Table(\"Questions\")\n\n fe = Attr(\"TopicId\").eq(topic.get(\"TopicId\"))\n response = question_table.scan(FilterExpression=fe)\n questions = response.get(\"Items\")\n return questions",
"def topic(self) -> str:\n return self._topic",
"def describe_topic(self, index):\n assert(self.has_topics)\n assert(0 <= index < self.K)\n return self.topics[index]"
]
| [
"0.7341961",
"0.719212",
"0.7157874",
"0.69891924",
"0.69395",
"0.68769634",
"0.6867653",
"0.67888474",
"0.6777278",
"0.665143",
"0.6563236",
"0.65536606",
"0.6545432",
"0.6540096",
"0.64925075",
"0.645844",
"0.644433",
"0.6385114",
"0.6378959",
"0.6343912",
"0.63417065",
"0.6313446",
"0.6287946",
"0.61786264",
"0.617683",
"0.61241",
"0.61204374",
"0.61181325",
"0.60503244",
"0.6025973"
]
| 0.71923053 | 1 |
Function to retrieve course from the database | def get_course(self, name):
GET_TOPIC_IDS = """SELECT topic_id FROM CourseTopics WHERE course_name = %s"""
GET_GOAL_IDS = """SELECT goal_id FROM CourseGoals WHERE course_name = %s"""
ret = None
try:
self.db_cursor.execute("""SELECT subject_code, credit_hours, description FROM Course WHERE name = %s""", (name,))
c = self.db_cursor.fetchall()
ret = Course()
ret.subject_code = c[0][0]
ret.credit_hours = c[0][1]
ret.description = c[0][2]
ret.name = name
ret.goals = []
ret.topics = []
self.db_cursor.execute(GET_TOPIC_IDS, (name,))
t_ids = self.db_cursor.fetchall()
for id in t_ids:
ret.topics.append(id[0])
self.db_cursor.execute(GET_GOAL_IDS, (name,))
g_ids = self.db_cursor.fetchall()
for id in g_ids:
ret.goals.append(id[0])
except:
logging.warning("DBAdapter: Error- cannot retrieve course: " + str(name))
return None
return ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_course(self):\n db = Course._file.read_db()\n courses = db[\"courses\"]\n for crs in courses:\n if crs[\"course_name\"] == self._course_name:\n return Course(**crs)\n break",
"def select_course(self, subject, course_num):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"SELECT * FROM courses WHERE (subject=? AND course_num=?)\",\n (subject, course_num),\n )\n return cursor.fetchone()",
"def get_courses(db: Session = Depends(get_db)): # , _: models.User = Depends(get_current_user))\n return crud.course.get_multi(db, skip=0, limit=100)",
"def get_course(course_id: int, db: Session = Depends(get_db)):\n course = crud.course.get(db, obj_id=course_id)\n\n if not course:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f'Course {course_id} not found')\n\n return schemas.CourseResponseWithUsers(\n name=course.name,\n level=course.level,\n description=course.description,\n background_color=course.background_color,\n activities=course.activities,\n users=course.users.all()\n )",
"def select_course_detail(self, course_id, course_section_id):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"\"\"SELECT \n c.subject, c.course_num, c.course_title,\n cs.course_section_id, cs.schedule_days, cs.start_time, cs.end_time,\n i.first_name || ' ' || i.last_name AS 'Instructor Name', c.course_units\n FROM courses c\n JOIN course_sections cs\n ON c.course_id = cs.course_id\n JOIN instructors i\n ON cs.instructor_id = i.instructor_id\n WHERE c.course_id = ? AND cs.course_section_id = ?\"\"\",\n (course_id, course_section_id),\n )\n return cursor.fetchone()",
"def get_course_by_id(course_id):\n course = Courses.query. \\\n filter_by(id=course_id). \\\n first_or_404()\n\n return course",
"def getCourseData(self, course):\n\t\tif course == None:\n\t\t\treturn None\n\t\tcommand = \"SELECT name, description, author_id FROM courses WHERE id=?;\"\n\t\tparams = (course,)\n\n\t\tdata = self._run_command(command, params)\n\n\t\tif not data:\n\t\t\treturn None\n\n\t\tdata = data[0]\n\t\tresult = {\"name\": data[0] if data[0] else \"\", \n\t\t\"description\": data[1] if data[1] else \"\", \n\t\t\"author_id\": data[2]}\n\n\t\treturn result",
"def get_course(self, _):\r\n courses = self.modulestore.get_courses()\r\n return courses[0]",
"def get_course(self, id):\n id = str(id)\n for i in range(len(self.courses)):\n if self.courses[i].id == id:\n return self.courses[i]",
"def get_course(self, **fields):\n existing_fields = [i.name for i in self._db.get_columns('courses')]\n course_fields = {}\n for key, value in fields.items():\n if key in existing_fields:\n course_fields[key] = value\n additional_fields = ['group_key'] # Additional fields that could be passed in args\n group_fields = {}\n for key, value in fields.items():\n if key in additional_fields:\n group_fields[key] = value\n group = None if len(group_fields) == 0 else self.get_group(**group_fields)\n if group is not None:\n courses = [i for i in Courses.select().where(Courses.id == group.course).filter(**course_fields)]\n else:\n courses = [i for i in Courses.select().filter(**course_fields)]\n # Expect single value if search by group or unique fields, list if by non-unique\n return courses if len(courses) > 1 else courses[0] if len(courses) == 1 else None",
"def get_course(course_id, depth=0):\r\n course = modulestore().get_course(course_id, depth=depth)\r\n if course is None:\r\n raise ValueError(u\"Course not found: {0}\".format(course_id))\r\n return course",
"def get_course(self, course_id: str) -> Course:\n course = self.collection.find_one({\"id\": course_id})\n if not course:\n raise NotFound(f\"Course with id {course_id} was not found\")\n return Course.parse_obj(course)",
"def get_course(self):\n bib = self.get_bib()\n obj = race()\n course = find(obj.courses, name=str(bib))\n if course:\n return course\n\n # get course via group\n person = self.get_person()\n if person and isinstance(person, Person):\n if person.group:\n return person.group.course\n\n return None",
"def get_course_by_id(course_key, depth=0):\r\n course = modulestore().get_course(course_key, depth=depth)\r\n if course:\r\n return course\r\n else:\r\n raise Http404(\"Course not found.\")",
"def course_from_id(course_id):\r\n return modulestore().get_course(course_id)",
"def get_course(self, name):\r\n print \"Importing {0}\".format(name)\r\n\r\n modulestore = XMLModuleStore(DATA_DIR, course_dirs=[name])\r\n courses = modulestore.get_courses()\r\n self.modulestore = modulestore\r\n self.assertEquals(len(courses), 1)\r\n return courses[0]",
"def retrieve_courses(self) -> pd.DataFrame:\n if self.courses_df is None:\n self.courses_df = pd.read_sql_query('SELECT * FROM courses', con=self.connection())\n\n return self.courses_df",
"def parse_get_course(xml_course):\n parse_course = parse_create_course(xml_course)\n query_constraints = {\n \"termCode\": parse_course[\"termCode\"],\n \"subject\": parse_course[\"subject\"],\n \"courseNumber\": parse_course[\"courseNumber\"]\n }\n params = urllib.urlencode({\"where\": json.dumps(query_constraints)})\n connection = httplib.HTTPSConnection(PARSE_API_URL, PARSE_API_PORT)\n connection.connect()\n connection.request(\n \"GET\",\n \"%s?%s\" % (COURSES_ENDPOINT, params),\n '',\n {\"X-Parse-Application-Id\": app_id, \"X-Parse-REST-API-Key\": rest_api_key}\n )\n response = json.loads(connection.getresponse().read())\n if response.get(\"results\"):\n return response[\"results\"][0]\n else:\n return None",
"def get_course(self):\r\n return self.descriptor.runtime.modulestore.get_course(self.course_id)",
"def get_course(dept, num):\n \n # semester: 10 = Fall, 20 = Spring, 30 = Summer\n host = \"https://selfservice.mypurdue.purdue.edu/prod/bwckctlg.p_disp_course_detail\"\n query = \"?cat_term_in={term}&subj_code_in={dept}&crse_numb_in={num}\".format(term=\"201620\", dept=dept, num=num)\n urlfetch.set_default_fetch_deadline(600)\n result = urlfetch.fetch(host+query)\n \n if result.status_code == 200:\n tree = html.fromstring(result.content)\n text = tree[1][4][2].text_content() # get just the relevant text of the webpage \n\n # remove unicode non-breaking spaces to allow regexing\n text = text.replace(u'\\xa0',u' ')\n return text",
"def select_course_detail_by_title(self, title):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"\"\"SELECT \n c.course_id, c.subject, c.course_num, c.course_title,\n cs.course_section_id, cs.schedule_days, cs.start_time, cs.end_time,\n i.first_name || ' ' || i.last_name AS 'Instructor Name', c.course_units\n FROM courses c\n JOIN course_sections cs\n ON c.course_id = cs.course_id\n JOIN instructors i\n ON cs.instructor_id = i.instructor_id\n WHERE c.course_title LIKE ?;\n \"\"\",\n (title,),\n )\n return cursor.fetchall()",
"def clean_course(self):\n course_id = self.cleaned_data[self.Fields.COURSE].strip()\n if not course_id:\n return None\n try:\n client = EnrollmentApiClient()\n return client.get_course_details(course_id)\n except (HttpClientError, HttpServerError):\n raise ValidationError(ValidationMessages.INVALID_COURSE_ID.format(course_id=course_id))",
"def GetCourses(firebase: firebase) -> None:\n\n global courses\n obj_key_list = []\n\n result = firebase.get('/course', None)\n\n if result is None:\n return\n\n for i in result.keys():\n obj_key_list.append(i)\n\n for i in obj_key_list:\n course = Course()\n course.setId(i)\n course.setKnowledgeAreaId(result[i]['knowledgeareaid'])\n course.setCatalogId(result[i]['catalogid'])\n course.setTitle(result[i]['name'])\n course.setDescription(result[i]['description'])\n course.setInstructor(result[i]['instructor'])\n course.setFee(result[i]['fee'])\n courses.append(course)",
"def get_courses(self):\n\n self.search([]).unlink()\n token = self.env['odoo.moodle'].search([('create_uid', '=', self.env.user.id)]).token\n domain = \"http://localhost:8888\"\n webservice_url = \"/webservice/rest/server.php?\"\n parameters = {\n \"wstoken\":token,\n 'wsfunction': 'core_course_get_courses',\n 'moodlewsrestformat': 'json'\n }\n request = requests.get(url=domain+webservice_url, params=parameters)\n request = request.json()\n print(request)\n\n for req in request:\n try:\n if req['id']==1:\n pass\n else:\n self.create({\n 'course_id': req['id'], \n 'category':req['categoryid'],\n 'fullname':req['fullname'], \n 'shortname':req['shortname'],\n 'summary': req['summary']\n }\n )\n except Exception:\n print('Course not created')",
"def get_course(self, course_key, depth=None):\r\n assert(isinstance(course_key, CourseKey))\r\n store = self._get_modulestore_for_courseid(course_key)\r\n try:\r\n return store.get_course(course_key, depth=depth)\r\n except ItemNotFoundError:\r\n return None",
"def get_by_name(self, course_name):\n course = Course.query.filter_by(name=course_name).first()\n\n return course",
"def get_course(self, course_key, depth=None):\r\n assert(isinstance(course_key, SlashSeparatedCourseKey))\r\n location = course_key.make_usage_key('course', course_key.run)\r\n try:\r\n return self.get_item(location, depth=depth)\r\n except ItemNotFoundError:\r\n return None",
"def course(institution, course, mode):\n\n try:\n logging.info(f\"Process a request for an course resource\\nurl: {request.url}\")\n\n params = dict({\"institution_id\": institution, \"course_id\": course, \"mode\": mode})\n logging.info(f\"Parameters: {params}\")\n\n #\n # The params are used in DB queries, so let's do\n # some basic sanitisation of them.\n #\n if not valid_course_params(params):\n logging.error(f\"valid_course_params returned false for {params}\")\n return Response(\n get_http_error_response_json(\n \"Bad Request\", \"Parameter Error\", \"Invalid parameter passed\"\n ),\n headers={\"Content-Type\": \"application/json\"},\n status=400,\n )\n\n logging.info(\"The parameters look good\")\n \n courses_collection_link = get_collection_link(cosmosdb_database_id, cosmosdb_courses_collection_id)\n dataset_collection_link = get_collection_link(cosmosdb_database_id, cosmosdb_dataset_collection_id)\n\n # Intialise a CourseFetcher\n course_fetcher = CourseFetcher(client, courses_collection_link)\n\n # Initialise dataset helper - used for retrieving latest dataset version\n dsh = DataSetHelper(client, dataset_collection_link)\n version = dsh.get_highest_successful_version_number()\n\n # Get the course\n course = course_fetcher.get_course(version=version, **params)\n\n if course:\n return Response(\n course, headers={\"Content-Type\": \"application/json\"},\n status=200\n )\n else:\n return Response(\n get_http_error_response_json(\n \"Not Found\", \"course\", \"Course was not found.\"\n ),\n headers={\"Content-Type\": \"application/json\"},\n status=404,\n )\n\n except Exception as e:\n logging.error(traceback.format_exc())\n\n # Raise so Azure sends back the HTTP 500\n raise e",
"def getUserCourse(self, chat_id):\n\t\tcommand = \"SELECT cur_course FROM users WHERE chat_id=?;\"\n\t\tparams = (chat_id,)\n\n\t\tdata = self._run_command(command, params)\n\n\t\treturn data[0][0]",
"def detail_course(request, pk, template=\"core/detail_course.html\"):\n response = {\n 'course': get_object_or_404(Course, pk=pk)\n }\n return direct_to_template(request, template, response)"
]
| [
"0.77193254",
"0.7518302",
"0.7269233",
"0.72150934",
"0.7173703",
"0.7170874",
"0.7087158",
"0.70306355",
"0.6874583",
"0.6817401",
"0.6806821",
"0.67756855",
"0.66746914",
"0.66484475",
"0.66312397",
"0.6618067",
"0.66042495",
"0.65964794",
"0.65766454",
"0.6574305",
"0.6556138",
"0.6522771",
"0.65005785",
"0.6485456",
"0.64147824",
"0.6390107",
"0.6357984",
"0.63531417",
"0.63265514",
"0.63258827"
]
| 0.76162946 | 1 |
Fucntion to set the course in the db | def set_course(self, new_course, updating=False):
COURSE_QUERY = """UPDATE Course SET subject_code = %s, credit_hours = %s, description = %s WHERE name = %s""" if updating \
else """INSERT INTO Course (subject_code, credit_hours, description, name) VALUES (%s, %s, %s, %s)"""
self.db_cursor.execute(COURSE_QUERY, (new_course.subject_code, new_course.credit_hours, new_course.description, new_course.name))
self.db_connection.commit()
# Add course topics and course goals:
for ct_id in new_course.topics:
self.set_course_topic(ct_id, new_course.name)
for cg_id in new_course.goals:
self.set_course_goal(cg_id, new_course.name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_course(self):\r\n self.course.save()\r\n self.store.update_item(self.course, self.user.id)",
"def edit_course(self, course):\n EDIT_COURSE = \"\"\"UPDATE Course SET subject_code = %s, credit_hours = %s, description = %s WHERE name = %s\"\"\"\n\n self.db_cursor.execute(EDIT_COURSE, (\n course.subject_code, course.credit_hours, course.description, course.name))\n self.db_connection.commit()\n\n DELETE_COURSE_TOPICS = \"\"\"DELETE FROM CourseTopics WHERE course_name = %s\"\"\"\n self.db_cursor.execute(DELETE_COURSE_TOPICS, (course.name,))\n self.db_connection.commit()\n INSERT_COURSE_TOPICS = \"\"\"INSERT INTO CourseTopics (course_name, topic_id) VALUES (%s, %s)\"\"\"\n for ct in course.topics:\n self.db_cursor.execute(INSERT_COURSE_TOPICS, (course.name,ct))\n self.db_connection.commit()\n\n DELETE_COURSE_GOALS = \"\"\"DELETE FROM CourseGoals WHERE course_name = %s\"\"\"\n self.db_cursor.execute(DELETE_COURSE_GOALS, (course.name,))\n self.db_connection.commit()\n INSERT_COURSE_GOALS = \"\"\"INSERT INTO CourseGoals (course_name, goal_id) VALUES (%s, %s)\"\"\"\n for cg in course.goals:\n self.db_cursor.execute(INSERT_COURSE_GOALS, (course.name, cg))\n self.db_connection.commit()",
"def setUserCourse(self, chat_id, course):\n\t\tcommand = \"UPDATE users SET cur_course=? WHERE chat_id=?;\"\n\t\tparams = (course, chat_id,)\n\n\t\tself._run_command(command, params)",
"def course(self, value: int):\n self._course = value",
"def save_course(self, course: Course) -> None:\n self.collection.insert_one(course.dict())",
"def update_course(self):\n # ensure that updating course is exists\n if self.is_course_exists():\n db = Course._file.read_db()\n for crs_i in range(len(db[\"courses\"])):\n if db[\"courses\"][crs_i][\"course_name\"] == self._course_name:\n\n # ensuring that user does not provided less number of limited places\n if db[\"courses\"][crs_i][\"total_place\"] > self._total_place:\n print(\"{} course's limited places number must be more than {}\".format(\n self._course_name,\n db[\"courses\"][crs_i][\"total_place\"]\n ))\n return\n\n db[\"courses\"][crs_i][\"teacher\"] = self._teacher\n db[\"courses\"][crs_i][\"total_place\"] = self._total_place\n break\n self._file.write_db(db)\n print(\"The course - {} is updated\".format(self._course_name))\n return self.get_course().course_info()",
"def test_update_entry_courses(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass",
"def set_course_topic(self, topic_id,course_name):\n self.db_cursor.execute(\n \"\"\"INSERT INTO CourseTopics (course_name, topic_id) VALUES (%s, %s)\"\"\",\n (course_name, topic_id))\n self.db_connection.commit()",
"def set_course_id(self, course_id):\n self.ID = course_id",
"def save_course(self):\n\t\tprint(\"Course sauvegardee\")\n\t\tprint(self.Course)\n\n\t\tprint(\"self.var_nom : \"+self.var_nom.get())\n\t\tself.Course.name=self.var_nom.get()\n\t\tprint(\"self.vqr_ete : \"+str(self.var_ete.get()))\n\t\tif(self.var_ete.get()==1):\n\t\t\tself.Course.season = \"Seulement ete\"\n\t\telif(self.var_hiver.get()==1):\n\t\t\tself.Course.season = \"Seulement hiver\"\n\t\telse:\n\t\t\tself.Course.season = \"Toutes\"\n\n\n\t\tif self.var_OK_invites.get() == 1:\n\t\t\tself.Course.OK_for_invitee = True\n\n\t\tif self.var_preparer_la_veille.get() == 1:\n\t\t\tself.Course.prepare_day1 = True\n\n\t\tif self.var_legume.get() == 1:\n\t\t\tself.Course.type_course = \"Legume\"\n\t\telif self.var_viande.get() == 1:\n\t\t\tself.Course.type_course = \"Viande\"\n\t\telif self.var_poisson.get() == 1:\n\t\t\tself.Course.type_course = \"Poisson\"\n\t\telif self.var_puree.get() == 1:\n\t\t\tself.Course.type_course = \"Puree\"\n\t\telif self.var_soupe.get() == 1:\n\t\t\tself.Course.type_course = \"Soupe\"\n\t\telif self.var_salade.get() == 1:\n\t\t\tself.Course.type_course = \"Salade\"\n\t\telif self.var_autre .get() == 1:\n\t\t\tself.Course.type_course = \"Autres\"\n\t\telse:\t\n\t\t\tself.Course.type_course = \"Autres\"\n\t\t\n\n\t\tself.Course.recipe = self.text_recipe.get(\"1.0\",END)\n\t\tself.Course.link = self.text_link.get(\"1.0\",END)\n\t\tprint(self.Course)\n\t\t\n\t\tself.getListOfRecette()\n\t\tself.list_course.append(self.Course)\n\t\tself.saveListOfRecette()\n\t\t#on quitte la fenetreTopLevel\t\n\t\tself.parentFrame.destroy()",
"def refresh_course(self):\r\n self.course = modulestore().get_course(self.course.id)",
"def reload_course(self):\r\n self.course = self.store.get_course(self.course.id)",
"def set_curriculum_course(self, curriculum_name, course_name, required, updating=True):\n CURRICULUM_COURSE_QUERY = \"\"\"UPDATE CurriculumListings SET required = %s WHERE curriculum_name = %s AND course_name = %s\"\"\" if updating \\\n else \"\"\"INSERT INTO CurriculumListings (curriculum_name, course_name, required) VALUES (%s, %s, %s)\"\"\"\n\n if not updating:\n self.db_cursor.execute(\n CURRICULUM_COURSE_QUERY,\n (curriculum_name, course_name, required))\n else:\n self.db_cursor.execute(\n CURRICULUM_COURSE_QUERY,\n (required, curriculum_name, course_name))\n self.db_connection.commit()",
"def set_up_course(self):\r\n course = CourseFactory(start=datetime(2013, 9, 16, 7, 17, 28))\r\n course = modulestore().get_course(course.id) # pylint: disable=no-member\r\n return course",
"def update(request):\n\tcourse_id = request.GET.get('course_id')\n\tif request.method == 'POST':\n\t\tcourse_title = request.POST['course_title']\n\t\tinstitute_name = request.POST['institute_name']\n\t\tcourse_desc = request.POST['course_desc']\n\t\tcurrent_data = Course.objects.get(course_id = course_id)\n\t\tcurrent_data.course_title = course_title\n\t\tcurrent_data.institute_name = institute_name\n\t\tcurrent_data.course_desc = course_desc\n\t\tcurrent_data.save()\n\t\treturn HttpResponseRedirect(reverse('courseapp:index'))\n\tdata = Course.objects.get(course_id = course_id)\n\treturn render(request,'update.html',{'data':data})",
"def _configure_course(self):\r\n url = STUDIO_BASE_URL + '/settings/details/' + self._course_key\r\n\r\n # First, get the current values\r\n response = self.session.get(url, headers=self.headers)\r\n\r\n if not response.ok:\r\n raise CourseFixtureError(\r\n \"Could not retrieve course details. Status was {0}\".format(\r\n response.status_code))\r\n\r\n try:\r\n details = response.json()\r\n except ValueError:\r\n raise CourseFixtureError(\r\n \"Could not decode course details as JSON: '{0}'\".format(details)\r\n )\r\n\r\n # Update the old details with our overrides\r\n details.update(self._course_details)\r\n\r\n # POST the updated details to Studio\r\n response = self.session.post(\r\n url, data=self._encode_post_dict(details),\r\n headers=self.headers,\r\n )\r\n\r\n if not response.ok:\r\n raise CourseFixtureError(\r\n \"Could not update course details to '{0}' with {1}: Status was {2}.\".format(\r\n self._course_details, url, response.status_code))",
"def update_course(course):\r\n store = editable_modulestore()\r\n store.update_item(course, '**replace_user**')\r\n updated_course = store.get_course(course.id)\r\n return updated_course",
"def set_course_goal(self, goal_id, course_name):\n self.db_cursor.execute(\n \"\"\"INSERT INTO CourseGoals (course_name, goal_id) VALUES (%s, %s)\"\"\",\n (course_name, goal_id))\n self.db_connection.commit()",
"def register_course(self, **fields):\n if 'course_key' not in fields.keys():\n raise KeyError('Primary key is missing')\n existing_fields = [i.name for i in self._db.get_columns('courses')]\n needed_fields = {}\n for key, value in fields.items():\n if key in existing_fields:\n needed_fields[key] = value\n check = Courses.get_or_none(course_key=needed_fields['course_key'])\n if check is not None:\n return check\n new_course = Courses.get_or_create(**needed_fields)\n return new_course",
"def _update(self, course_name: str, newdata: ParseType) -> None:\n\n self.courses[course_name] = newdata",
"def test_update_topic_courses(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass",
"def put_child(course_id, section_id):\n\n section_url = SECTIONS_ENDPOINT + \"/\" + section_id\n course_url = COURSES_ENDPOINT + \"/\" + course_id\n\n # Get course from Parse\n course_connection = httplib.HTTPSConnection(PARSE_API_URL, PARSE_API_PORT)\n course_connection.connect()\n course_connection.request(\n method='GET',\n url=course_url,\n headers={\"X-Parse-Application-Id\": app_id, \"X-Parse-REST-API-Key\": rest_api_key}\n )\n course = json.loads(course_connection.getresponse().read())\n # Add Section id to Course's list\n if course and (section_id not in course[\"sections\"]):\n course[\"sections\"].append(section_id)\n\n # Persist new course\n course_connection.request(\n method='PUT',\n url=course_url,\n body=json.dumps(course),\n headers={\"X-Parse-Application-Id\": app_id, \"X-Parse-REST-API-Key\": rest_api_key}\n )",
"def partial_update_course(course_id: int, course_request: schemas.CourseRequestPartial, db: Session = Depends(get_db)):\n\n course = crud.course.get(db, obj_id=course_id)\n\n if not course:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f'Course {course_id} not found')\n\n try:\n return crud.course.update(db, db_obj=course, obj_in=course_request)\n except Exception as error:\n logger.error(f'{error}')\n raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f'{error}')",
"def create_course(request, username):\n\n # git data from post form\n if request.method == \"POST\":\n\n\n # if user log in \n try:\n instructor = User.objects.get(username=username)\n if ensure_login(instructor) == False:\n return JsonResponse({'login': 'User must login'}, status=403) \n except:\n return JsonResponse({'login': 'User must login'}, status=403)\n\n # check if an instructor\n if not instructor.is_staff:\n return JsonResponse({'error': 'Not permitted to create course'}, status=403)\n\n course_code = request.POST['code']\n course_name = request.POST['name']\n level = request.POST['level']\n\n print(course_code, course_name, level)\n\n # validate fileds\n errors = {}\n\n # check if course code is exists \n if (value_is_exists(\"course_code\",course_code,Course)):\n return JsonResponse({\"coursecode\":\"course code already exist\"})\n\n # check if course name is valid \n if course_name.replace(\" \",\"\").isalpha() == False:\n return JsonResponse({\"coursename\" : \"course name is not valid\"})\n\n # validate level \n try:\n if int(level) > 7:\n errors.update({\"levellen\" : \"level must be less than 7\"})\n except:\n errors.update({\"levelint\" : \"level must be integer\"})\n\n # check if not found errors \n if len(errors) != 0:\n return JsonResponse({'errors': errors}, status=400)\n \n\n # add data to database \n course = Course(created_by_instructor=instructor ,course_code = course_code, course_name = course_name, level = level)\n \n # commit change \n course.save()\n\n print('course created successfully')\n\n # if no errors return success \n return JsonResponse({'success': True}, status=200)\n else:\n return JsonResponse({'error': 'Method not allowed'}, status=405)",
"def setUp(self):\n BaseUser.setUp(self)\n self.login()\n new_course = dict(course=\"maths\")\n test_post_request(self, \"/course/new\", new_course, models.Course, 1)",
"def set_up_course(self, **course_kwargs):\r\n course = CourseFactory(**course_kwargs)\r\n chapter = ItemFactory(category='chapter', parent_location=course.location) # pylint: disable=no-member\r\n section = ItemFactory(category='sequential', parent_location=chapter.location, due=datetime(2013, 9, 18, 11, 30, 00))\r\n vertical = ItemFactory(category='vertical', parent_location=section.location)\r\n ItemFactory(category='problem', parent_location=vertical.location)\r\n\r\n course = modulestore().get_course(course.id) # pylint: disable=no-member\r\n self.assertIsNotNone(course.get_children()[0].get_children()[0].due)\r\n return course",
"def update_db():\n \n with open(\"courses_2016.json\") as data:\n data = data.read()\n\n courses = json.loads(data)\n\n for course in courses:\n try:\n [dept, course] = course.split(\" \")\n text = get_course(dept, course)\n insert_course(dept, course, text)\n except:\n failures.append(course)",
"def _create_course(self):\r\n # If the course already exists, this will respond\r\n # with a 200 and an error message, which we ignore.\r\n response = self.session.post(\r\n STUDIO_BASE_URL + '/course/',\r\n data=self._encode_post_dict(self._course_dict),\r\n headers=self.headers\r\n )\r\n\r\n try:\r\n err = response.json().get('ErrMsg')\r\n\r\n except ValueError:\r\n raise CourseFixtureError(\r\n \"Could not parse response from course request as JSON: '{0}'\".format(\r\n response.content))\r\n\r\n # This will occur if the course identifier is not unique\r\n if err is not None:\r\n raise CourseFixtureError(\"Could not create course {0}. Error message: '{1}'\".format(self, err))\r\n\r\n if not response.ok:\r\n raise CourseFixtureError(\r\n \"Could not create course {0}. Status was {1}\".format(\r\n self._course_dict, response.status_code))",
"def put(self):\n request = transforms.loads(self.request.get('request'))\n\n if not self.assert_xsrf_token_or_fail(\n request, 'import-course', {'key': None}):\n return\n\n if not CourseOutlineRights.can_edit(self):\n transforms.send_json_response(self, 401, 'Access denied.', {})\n return\n\n payload = request.get('payload')\n course_raw = transforms.json_to_dict(\n transforms.loads(payload), self.SCHEMA_DICT)['course']\n\n source = None\n for acourse in sites.get_all_courses():\n if acourse.raw == course_raw:\n source = acourse\n break\n\n if not source:\n transforms.send_json_response(\n self, 404, 'Object not found.', {'raw': course_raw})\n return\n\n course = courses.Course(self)\n errors = []\n try:\n course.import_from(source, errors)\n except Exception as e: # pylint: disable-msg=broad-except\n logging.exception(e)\n errors.append('Import failed: %s' % e)\n\n if errors:\n transforms.send_json_response(self, 412, '\\n'.join(errors))\n return\n\n course.save()\n transforms.send_json_response(self, 200, 'Imported.')",
"def create_course(course_request: schemas.CourseRequest, db: Session = Depends(get_db)):\n\n try:\n course = crud.course.create(db, obj_in=course_request)\n\n except Exception as error:\n logger.error(f'Error when creating a course: {error}')\n raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f'{error}')\n\n return course"
]
| [
"0.76697034",
"0.69201535",
"0.69193166",
"0.6838468",
"0.682473",
"0.6684611",
"0.66294444",
"0.6570262",
"0.6566537",
"0.65445465",
"0.6534536",
"0.64993685",
"0.6451552",
"0.64059365",
"0.6341957",
"0.6327892",
"0.6260288",
"0.62347114",
"0.61435986",
"0.6129142",
"0.61188996",
"0.6044205",
"0.6031672",
"0.6020229",
"0.602008",
"0.60136366",
"0.599826",
"0.59943926",
"0.595787",
"0.5914835"
]
| 0.69831127 | 1 |
Function to retrieve section from the db | def get_section(self, new_section):
SECTION = """SELECT COUNT(*) FROM Section WHERE id = %s"""
ret = None
#try:
self.db_cursor.execute("""SELECT num_students, comment1, comment2 FROM Section WHERE course_name = %s AND semester = %s AND section_id = %s""",
(new_section.course_name, new_section.semester, new_section.section_id))
c = self.db_cursor.fetchall()
ret = Section()
if c:
ret.num_students = c[0][0]
ret.comment1 = c[0][1]
ret.comment2 = c[0][2]
ret.course_name = new_section.course_name
ret.semester = new_section.semester
ret.section_id = new_section.section_id
else:
ret = None
#except:
# logging.warning("DBAdapter: Error- cannot retrieve section: " + str(new_section.section_id))
return ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_sections(self,):\n self.logger.info(\"\\t[+] get_sections [+]\")\n try:\n return self.sections.select().execute()\n except Exception as e:\n self.logger.critical(\"\\t[-] Exception occured [-]\")\n self.logger.critical(\"\\t\" + str(e))\n self.logger.critical(\"\\t[-] Exception occured [-]\")",
"def _get_section(self, sections, section_id):\n for section in sections:\n\t if section['section_id'] == section_id:\n\t return section",
"def _get_section(self, sections, section_id):\n for section in sections:\n\t if section['section_id'] == section_id:\n\t return section",
"def Section_get(self,sname):\n self.sname = sname \n try:\n data = self.config.items(self.sname)\n logger.info('Department :'+self.sname+' was show.')\n return data\n except Exception as e:\n logger.error(e)\n return 1",
"def parse_get_section(xml_course):\n parse_section = parse_create_section(xml_course)\n query_constraints = {\n \"crn\": parse_section[\"crn\"]\n }\n params = urllib.urlencode({\"where\": json.dumps(query_constraints)})\n connection = httplib.HTTPSConnection(PARSE_API_URL, PARSE_API_PORT)\n connection.connect()\n connection.request(\n \"GET\",\n \"%s?%s\" % (SECTIONS_ENDPOINT, params),\n '',\n {\"X-Parse-Application-Id\": app_id, \"X-Parse-REST-API-Key\": rest_api_key}\n )\n response = json.loads(connection.getresponse().read())\n if response.get(\"results\"):\n return response[\"results\"][0]\n else:\n return None",
"def get_sections():\n return Section.objects.all()",
"def __getitem__(self, section_id):",
"def get_posts_section(sect):\n return Posts.objects.filter(section__section=sect)",
"def read(id):\n db = core.connect()\n return db[id]",
"def _get_section(subpath=\"\"):\n def get_section(self, sort=\"new\", time=\"all\", limit=DEFAULT_CONTENT_LIMIT,\n place_holder=None):\n url_data = {\"sort\": sort, \"time\": time}\n return self.reddit_session._get_content(urljoin(self._url, subpath),\n limit=limit,\n url_data=url_data,\n place_holder=place_holder)\n return get_section",
"def getSection(self, role):\n rel = role.section.all()\n if len(rel):\n return rel[0]\n else:\n return None",
"def get_categs_section(sect):\n return Category.objects.filter(section__section=sect)",
"def get_section(self, section_name):\n section_name = JSONSchema.format_section_name(section_name).lower()\n try:\n return self._sections[section_name]\n except KeyError:\n raise AquaError('No section \"{0}\"'.format(section_name))",
"def section(self):\n return SECTION_NAME_TO_SECTION[self.section_name]",
"def _read_section(self, pointer, nr_of_leads):\n if pointer.id == 1:\n return self._section1(pointer)\n if pointer.id == 2:\n return self._section2(pointer)\n elif pointer.id == 3:\n return self._section3(pointer)\n elif pointer.id == 4:\n return self._section4(pointer)\n elif pointer.id == 5:\n return self._section5(pointer, nr_of_leads)\n elif pointer.id == 6:\n return self._section6(pointer, nr_of_leads)\n elif pointer.id == 7:\n return self._section7(pointer)\n elif pointer.id == 8:\n return self._section8(pointer)\n elif pointer.id == 9:\n return self._section9(pointer)\n elif pointer.id == 10:\n return self._section10(pointer)\n elif pointer.id == 11:\n return self._section11(pointer)\n elif pointer.id == 12:\n return self._section12(pointer)\n elif pointer.id > 12:\n print(\"WARN: Section Id %s is not implemented\" % str(pointer.id))\n return None",
"def __getitem__(self, section):\n result = self.get(section)\n\n if result is None:\n raise KeyError(section)\n\n return result",
"def get_about_section(course_descriptor, field):\n usage_key = course_descriptor.id.make_usage_key(\"about\", field)\n try:\n return modulestore().get_item(usage_key).data\n except ItemNotFoundError:\n return None",
"def getbook():\n #Opens the database file to read the lines\n with open(\"database.txt\", \"r\") as f:\n #Iterates through each line and splits the line into individual strings\n for line in f:\n s=line.strip()\n string=s.split(\":\")\n return(string)",
"def get_sections(uuid: UUID) -> Optional[Dict[str, 'AoE2FileSection']]:\n scenario = store.get_scenario(uuid)\n if scenario:\n return scenario.sections\n return None",
"def select_course_detail(self, course_id, course_section_id):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"\"\"SELECT \n c.subject, c.course_num, c.course_title,\n cs.course_section_id, cs.schedule_days, cs.start_time, cs.end_time,\n i.first_name || ' ' || i.last_name AS 'Instructor Name', c.course_units\n FROM courses c\n JOIN course_sections cs\n ON c.course_id = cs.course_id\n JOIN instructors i\n ON cs.instructor_id = i.instructor_id\n WHERE c.course_id = ? AND cs.course_section_id = ?\"\"\",\n (course_id, course_section_id),\n )\n return cursor.fetchone()",
"def get_section(self, section_name: str) -> NetSection:\n return self.sections[section_name]",
"def test_vault_get_vault_section(self):\n pass",
"def get_section(self,name):\n if self.__config.has_section(name):\n data={}\n for opt,val in self.__config.items(name):\n data[opt]=val\n return data\n else:\n raise Exception(_('EVOGTK: Section \"%s\" does not exist in this preferences instance') % name)",
"def get_conf_by_section(self, section):\n try:\n return get_conf(self.conf_file)[section]\n except:\n return None",
"def _get_page_sections(self, sectionNum=None, sectionName=None):\n self.section = {}\n self.sections = [] # list maintains order\n content = self.page.content\n lines = content.split(\"\\n\")\n currentSection = None\n for line in lines:\n if \"==\" in line:\n line = line.replace(\"Edit =\",\"\")\n line = line.replace(\"=\",\"\").lstrip().rstrip()\n self.section[line] = []\n currentSection = line\n self.sections.append(currentSection)\n elif currentSection is not None:\n line = line.lstrip().rstrip()\n self.section[currentSection].append(line)\n else:\n pass\n logger.info(\"Sections in page: \"+str(self.sections))\n # and return some section:\n if sectionNum is not None:\n if sectionNum > len(self.sections) or sectionNum < 0:\n sectionNum = 0\n return self.section[self.sections[sectionNum]]\n elif sectionName is not None:\n pass",
"def get_section(self, section=None, set_section=False, add_section=False, search_in_default_config=None):\r\n section = self._check_section(section, search_in_default_config=search_in_default_config)\r\n return self._cfg.get_section(section=section, set_section=set_section, add_section=add_section)",
"def get_menu_section(id: int):\n menu_section = MenuSection.query.get(id)\n if not menu_section:\n return jsonify(success=False, MenuSection=\"No MenuSection with id={}\".format(id))\n\n result = menu_section_schema.dump(menu_section)\n return jsonify(success=True, MenuSection=result.data)",
"def getSectionIndex(self) -> int:\n ...",
"def _get_section(registry, section, title, hdg_level1=\"#\", hdg_level2=\"=\",\n output_dir=None):\n file_per_topic = output_dir is not None\n lines = [title, hdg_level1 * len(title), \"\"]\n if file_per_topic:\n lines.extend([\".. toctree::\", \" :maxdepth: 1\", \"\"])\n\n topics = sorted(registry.get_topics_for_section(section))\n for topic in topics:\n help = registry.get_detail(topic)\n heading, text = help.split(\"\\n\", 1)\n if not text.startswith(hdg_level2):\n underline = hdg_level2 * len(heading)\n help = \"%s\\n%s\\n\\n%s\\n\\n\" % (heading, underline, text)\n else:\n help = \"%s\\n%s\\n\\n\" % (heading, text)\n if file_per_topic:\n topic_id = _dump_text(output_dir, topic, help)\n lines.append(\" %s\" % topic_id)\n else:\n lines.append(help)\n\n return \"\\n\" + \"\\n\".join(lines) + \"\\n\"",
"def test_get_section_by_crn(self):\n response = self.client.open(\n '/pablokvitca/classdeck-api/1.0.0/section/{crn}'.format(crn=99999),\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))"
]
| [
"0.68347156",
"0.6583898",
"0.6583898",
"0.6450547",
"0.6379913",
"0.63227254",
"0.62380296",
"0.6147309",
"0.61408764",
"0.6059794",
"0.6054953",
"0.6021507",
"0.5988981",
"0.59255385",
"0.5904555",
"0.58982235",
"0.57892066",
"0.57753503",
"0.57172996",
"0.5716366",
"0.5697072",
"0.5676446",
"0.56751466",
"0.5629317",
"0.56203663",
"0.55493206",
"0.55322164",
"0.5525604",
"0.5483575",
"0.5465615"
]
| 0.6672911 | 1 |
Function for adding curriculum to the db | def set_curriculum(self, new_curriculum, updating=False):
if not updating:
#Add Curriculum to database:
self.db_cursor.execute("""INSERT INTO Curriculum (name, min_credit_hours, id_in_charge) VALUES (%s, %s, %s)""",
(new_curriculum.name, new_curriculum.min_credit_hours, new_curriculum.id_in_charge))
self.db_connection.commit()
#Add required courses:
arg_list = list(map(lambda r: (new_curriculum.name, r, True), new_curriculum.req_course_names))
self.db_cursor.executemany(
"""INSERT INTO CurriculumListings (curriculum_name, course_name, required) VALUES (%s, %s, %s)""",arg_list)
self.db_connection.commit()
#Add optional courses:
arg_list = list(map(lambda r: (new_curriculum.name, r, False), new_curriculum.opt_course_names))
self.db_cursor.executemany(
"""INSERT INTO CurriculumListings (curriculum_name, course_name, required) VALUES (%s, %s, %s)""",arg_list)
self.db_connection.commit()
#Add curriculum topics:
arg_list = list(map(lambda ct: (new_curriculum.name, ct.topic_id, ct.level, ct.subject_area, ct.time_unit), new_curriculum.cur_topics))
self.db_cursor.executemany(
"""INSERT INTO CurriculumTopics (curriculum_name, topic_id, level, subject_area, time_unit) VALUES (%s, %s, %s, %s, %s)""",arg_list)
self.db_connection.commit()
else:
# Update Curriculum to database:
self.db_cursor.execute(
"""UPDATE Curriculum SET min_credit_hours = %s, id_in_charge = %s WHERE name = %s""",
(new_curriculum.min_credit_hours, new_curriculum.id_in_charge, new_curriculum.name))
self.db_connection.commit()
# update optional courses:
arg_list = list(map(lambda r: (False, r, new_curriculum.name), new_curriculum.opt_course_names))
self.db_cursor.execute(
"""DELETE FROM CurriculumListings WHERE curriculum_name = %s""", (new_curriculum.name,)
)
self.db_connection.commit()
self.db_cursor.executemany(
"""INSERT INTO CurriculumListings (required, course_name, curriculum_name) VALUES (%s, %s, %s)""",
arg_list)
self.db_connection.commit()
# update required courses:
arg_list = list(map(lambda r: (True, r, new_curriculum.name), new_curriculum.req_course_names))
self.db_cursor.executemany(
"""INSERT INTO CurriculumListings (required, course_name, curriculum_name) VALUES (%s, %s, %s)""",
arg_list)
self.db_connection.commit()
# update curriculum topics:
arg_list = list(map(lambda ct: (ct.level, ct.subject_area, ct.time_unit, new_curriculum.name, ct.topic_id),
new_curriculum.cur_topics))
self.db_cursor.execute(
"""DELETE FROM CurriculumTopics WHERE curriculum_name = %s""", (new_curriculum.name,)
)
self.db_connection.commit()
self.db_cursor.executemany(
"""INSERT INTO CurriculumTopics (level, subject_area, time_unit, curriculum_name, topic_id) VALUES (%s, %s, %s, %s, %s)""",
arg_list)
self.db_connection.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_curricula(apps, schema_editor):\n Curriculum = apps.get_model('workshops', 'Curriculum')\n data = {\n 'swc-python': dict(\n name='Software Carpentry (shell, git, Python)',\n active=True,\n unknown=False,\n ),\n 'swc-r': dict(\n name='Software Carpentry (shell, git, R)',\n active=True,\n unknown=False,\n ),\n 'swc-es-python': dict(\n name='Software Carpentry in Spanish (shell, git, Python)',\n active=True,\n unknown=False,\n ),\n 'swc-es-r': dict(\n name='Software Carpentry in Spanish (shell, git, R)',\n active=True,\n unknown=False,\n ),\n 'swc-other': dict(\n name='Software Carpentry (other)',\n active=True,\n unknown=False,\n ),\n 'dc-ecology-python': dict(\n name='Data Carpentry (Ecology with Python)',\n active=True,\n unknown=False,\n ),\n 'dc-ecology-r': dict(\n name='Data Carpentry (Ecology with R)',\n active=True,\n unknown=False,\n ),\n 'dc-genomics': dict(\n name='Data Carpentry (Genomics)',\n active=True,\n unknown=False,\n ),\n 'dc-geospatial': dict(\n name='Data Carpentry (Geospatial)',\n active=True,\n unknown=False,\n ),\n 'dc-socsci-python': dict(\n name='Data Carpentry (Social Sciences with Python)',\n active=True,\n unknown=False,\n ),\n 'dc-socsci-r': dict(\n name='Data Carpentry (Social Sciences with R)',\n active=True,\n unknown=False,\n ),\n 'dc-other': dict(\n name='Data Carpentry (other)',\n active=True,\n unknown=False,\n ),\n 'lc': dict(\n name='Library Carpentry',\n active=True,\n unknown=False,\n ),\n 'unknown': dict(\n name=\"Don't know yet\",\n active=True,\n unknown=True,\n ),\n }\n for slug, defaults in data.items():\n Curriculum.objects.get_or_create(\n slug=slug,\n defaults=defaults,\n )",
"def remove_curriculum(self, curriculum):\n DELETE_CURRICULUM = \"\"\"DELETE FROM Curriculum WHERE name = %s\"\"\"\n\n try:\n self.db_cursor.execute(DELETE_CURRICULUM, (curriculum.name,))\n self.db_connection.commit()\n except:\n logging.warning(\"DBAdapter: Error- Could not delete curriculum.\")",
"def set_curriculum_course(self, curriculum_name, course_name, required, updating=True):\n CURRICULUM_COURSE_QUERY = \"\"\"UPDATE CurriculumListings SET required = %s WHERE curriculum_name = %s AND course_name = %s\"\"\" if updating \\\n else \"\"\"INSERT INTO CurriculumListings (curriculum_name, course_name, required) VALUES (%s, %s, %s)\"\"\"\n\n if not updating:\n self.db_cursor.execute(\n CURRICULUM_COURSE_QUERY,\n (curriculum_name, course_name, required))\n else:\n self.db_cursor.execute(\n CURRICULUM_COURSE_QUERY,\n (required, curriculum_name, course_name))\n self.db_connection.commit()",
"def run(self):\n self.db.table('points').insert({\n 'name': 'biblioteca',\n 'rfid': '123456'\n })",
"def create_new_lab(title):\n\n lab = Lab(title=title)\n db.session.add(lab)\n db.session.commit()\n\n return lab",
"def add_payment(keys,values,con,cur):\n \n psql=f\"\"\" insert into payments ({keys}) values {values};\"\"\"\n print('psql')\n print(psql)\n print(psql)\n cur.execute(psql)\n con.commit()",
"def add_to_database(\n cursor: sqlite3.Cursor, characteristic: int, degree: int, nonzero_degrees: str, nonzero_coeffs: str\n):\n cursor.execute(\n \"\"\"\n INSERT INTO polys (characteristic, degree, nonzero_degrees, nonzero_coeffs)\n VALUES (?,?,?,?)\n \"\"\",\n (characteristic, degree, nonzero_degrees, nonzero_coeffs),\n )",
"def add():\n add_form = AddCourseForm(request.form)\n if request.method == 'POST':\n Course.new(name=add_form.name.data,\n owner_id=g.user.id,\n visibility=add_form.visibility.data,\n term=add_form.term.data)\n flash('New course added')\n return redirect(url_for('courses.index'))\n return render_template('courses/add.html', add_form=add_form)",
"def remove_curriculum_courses(self, curriculum):\n DELETE_FROM_CURRICULUM_LISTINGS = \"\"\"DELETE FROM CurriculumListings WHERE curriculum_name = %s\"\"\"\n\n try:\n self.db_cursor.execute(DELETE_FROM_CURRICULUM_LISTINGS, (curriculum.name,))\n self.db_connection.commit()\n except:\n logging.warning(\"DBAdapter: Error- Could not delete curriculum courses.\")",
"def createCourse():\n\tif request.method == 'POST':\n\t\tcname = request.form['cname']\n\t\tcourseterm = request.form['courseterm']\n\t\tcoursepoint = request.form['coursepoint']\n\t\tcoursetype = request.form['coursetype']\t\n\t\tcourseyear = request.form['courseyear']\t\n\t\ttname = request.form['tname']\t\n\t\terror = None\n\n\t\tif not cname:\n\t\t\terror = 'Course name is required.'\n\t\telif not courseterm:\n\t\t\terror = 'Course term is required'\n\t\telif not courseterm:\n\t\t\terror = 'Course point is required'\n\n\t\tif error is not None:\n\t\t\tflash(error)\n\t\telse:\n\t\t\tdb = get_db()\n\t\t\tcur = db.cursor()\n\t\t\tcur.execute(\n\t\t\t\t'INSERT INTO course (cname, courseyear, coursetype, courseterm, coursepoint, tname)'\n\t\t\t\t' VALUES (%s, %s, %s, %s, %s, %s)',\n\t\t\t\t(cname, courseyear, coursetype, courseterm, coursepoint, tname)\n\t\t\t)\n\t\t\tdb.commit()\n\t\t\treturn redirect(url_for('info.index'))\n\n\treturn render_template('info/createCourse.html')",
"def add():\n prev_courses = Course._file.read_db()\n course_name = input(\"Please, type course name >\")\n # check course for uniqueness/ instantiating blank class with one attribute\n c = Course(course_name)\n if c.is_course_exists():\n print(\"{} is already exists\".format(course_name))\n return\n\n prev_courses[\"courses\"].append({\n \"course_name\": course_name,\n \"teacher\": input(\"Please, type teacher's email >\"),\n \"total_place\": int(input(\"Please, type total enrolled number >\")),\n \"students\": []\n })\n Course._file.write_db(prev_courses)\n print(\"New course - {} is added\".format(course_name))\n return",
"def create_race():\n # create a new race and set user_id to current_user\n user_id = current_user.id\n newRace = Race(user_id=user_id, date=datetime.now())\n\n # update database and commit changes\n db.session.add(newRace)\n db.session.commit()\n return redirect(url_for('setup.race_setup', race_id=newRace.id))",
"def insertarLectura(self, presion1, presion2, presion3, pulsaciones, sesion):\n try:\n cursor = self.__conexion.cursor()\n cursor.execute(\"INSERT INTO LecturasSensores VALUES(?, ?, ?, ?, ?, ?)\", [\n int(time.time()), presion1, presion2, presion3, pulsaciones, sesion])\n self.__conexion.commit()\n cursor.close()\n except sqlite3.Error as error:\n print(\"Error al insertar: \", error)",
"def __init__(self, pendulum, name=\"Pendulum\", dt=0.01):\n super().__init__(name=name)\n self.pendulum = pendulum\n self.dt = dt",
"def add():\n form = forms.JournalForm()\n if form.validate_on_submit():\n models.Journal.create(\n title=form.title.data,\n date=form.date.data,\n time_spent=form.time_spent.data,\n learnt=form.learnt.data,\n resources=form.resources.data)\n flash('Entry has been created', 'success')\n return redirect(url_for('index'))\n return render_template('add.html', form=form)",
"def add_container_to_database(request):\n new_container_name = request.POST['container_name']\n new_container_description = request.POST['container_description']\n new_container_rows = request.POST['container_rows']\n new_container_cols = request.POST['container_columns']\n new_container_location = request.POST['parent_node']\n exisiting_root_node_names = [str(loc.name).lower() for loc in Container.objects.all()]\n if new_container_name.lower() not in exisiting_root_node_names:\n new_container = Container(name=new_container_name, description=new_container_description, owner=request.user,\n rows=new_container_rows, columns=new_container_cols, location_id=new_container_location)\n new_container.save()\n return JsonResponse({'success': True, 'Error': None})\n else:\n return JsonResponse({'success': False, 'Error': 'There was an error with your submission!'})",
"def sr_add_c():\n req_data = request.get_json()\n logging.debug(\"req_data = \" + str(req_data))\n\n product_name = req_data['product_name']\n version_number = req_data['version_number']\n name = req_data['name']\n version = req_data['version']\n destination = req_data['destination']\n\n if destination == \"\":\n destination = \".\"\n\n outcome = {\"name\": \"Fail\"}\n\n try:\n # create new association\n c = Component.query.filter_by(name=name, version=version).first()\n sr = SoftwareRelease.query.filter_by(product_name=product_name, version_number=version_number).first()\n\n a = Association(destination=destination)\n a.component = c\n sr.components.append(a)\n\n db.session.commit()\n outcome['name'] = \"Success\"\n except:\n db.session.rollback()\n raise\n finally:\n db.session.close()\n return jsonify(outcome)",
"def add_db_equipement(self,c):\n insertQuery = \"INSERT INTO Equipement(NumeroEquipement,NumeroInstallation,NatureLibelle,InsNom) VALUES (?,?,?,?)\"\n\n c.execute(insertQuery, (self.equipement_id,self.ins_numero_install,self.nature_libelle,self.ins_nom))",
"def insert_into_solr():\n solr = pysolr.Solr('http://localhost:8983/solr/mag_journals', always_commit=True)\n filepath = '/vol1/mag/data/2018-07-19/dumps/Journals.txt'\n\n list_for_solr = []\n with open(filepath, \"r\") as file:\n csv_reader = csv.reader(file, delimiter='\\t')\n for journal_id, rank, normalized_name, display_name, issn, publisher, webpage, paper_count, citation_count, created_date in csv_reader:\n solr_record = {}\n solr_record['journal_id'] = journal_id\n solr_record['rank'] = rank\n solr_record['normalized_name'] = normalized_name\n solr_record['display_name'] = display_name\n solr_record['issn'] = issn\n solr_record['publisher'] = publisher\n solr_record['webpage'] = webpage\n solr_record['paper_count'] = paper_count\n solr_record['citation_count'] = citation_count\n solr_record['created_date'] = created_date\n list_for_solr.append(solr_record)\n # Upload to Solr: 48000-odd rows\n solr.add(list_for_solr)",
"def create_place():\n\n q = \"\"\"\n INSERT INTO escuelasutf8 (nombre, direccion, localidad,\n wkb_geometry_4326,\n id_distrito, id_seccion)\n VALUES ('%s', '%s', '%s', '%s', '%s', '%s')\n RETURNING ogc_fid\n \"\"\" % (\n request.form['nombre'].replace(\"'\", \"''\"),\n request.form['direccion'].replace(\"'\", \"''\"),\n request.form['localidad'].replace(\"'\", \"''\"),\n request.form['wkb_geometry_4326'],\n request.form['distrito'],\n request.form['seccion']\n )\n r = db.query(q)\n return flask.Response(flask.json.dumps(r.next()),\n mimetype=\"application/json\")",
"def init():\n logging.info(\"Creating DB\")\n db.drop_all()\n db.create_all()\n\n # season = Season(title='ZS 2020/2021',\n # start_date=date(year=2020, month=11, day=12),\n # end_date=date(year=2020, month=12, day=20))\n season = Season(title='ZS 2020/2021',\n start_date=date(year=2020, month=11, day=1),\n end_date=date(year=2020, month=12, day=20))\n db.session.add(season)\n db.session.flush()\n\n part = ChallengePart(season_id=season.id,\n order=0,\n target='MFF, Ke Karlovu 3',\n distance=0)\n db.session.add(part)\n\n part = ChallengePart(season_id=season.id,\n order=1,\n target='Hrčava (nejvýchodnější bod ČR)',\n distance=500)\n db.session.add(part)\n\n part = ChallengePart(season_id=season.id,\n order=2,\n target='Nová Sedlica (nejvýchodnější bod SR)',\n distance=388)\n db.session.add(part)\n\n part = ChallengePart(season_id=season.id,\n order=3,\n target='Oravská Polhora (nejsevernější bod SR)',\n distance=324)\n db.session.add(part)\n\n part = ChallengePart(season_id=season.id,\n order=4,\n target='Varšava',\n distance=518)\n db.session.add(part)\n\n part = ChallengePart(season_id=season.id,\n order=5,\n target='Gdaňsk',\n distance=446)\n db.session.add(part)\n\n part = ChallengePart(season_id=season.id,\n order=6,\n target='Berlín',\n distance=685)\n db.session.add(part)\n\n part = ChallengePart(season_id=season.id,\n order=7,\n target='Brémy',\n distance=472)\n db.session.add(part)\n\n part = ChallengePart(season_id=season.id,\n order=8,\n target='Bremenhaven',\n distance=94)\n db.session.add(part)\n\n part = ChallengePart(season_id=season.id,\n order=9,\n target='Amsterdam',\n distance=461)\n db.session.add(part)\n\n part = ChallengePart(season_id=season.id,\n order=10,\n target='Brussel',\n distance=260)\n db.session.add(part)\n\n part = ChallengePart(season_id=season.id,\n order=11,\n target='Lucemburk',\n distance=269)\n db.session.add(part)\n\n part = ChallengePart(season_id=season.id,\n order=12,\n target='Paříž',\n distance=352)\n db.session.add(part)\n\n part = ChallengePart(season_id=season.id,\n order=13,\n target='Bern',\n distance=724)\n db.session.add(part)\n\n part = ChallengePart(season_id=season.id,\n order=14,\n target='Monako',\n distance=650)\n db.session.add(part)\n\n db.session.commit()",
"def showcurriculum(request, c_id):\n\n # Current user\n current_user = get_object_or_404(Member, u_id=request.user)\n\n # Shitty solution for now\n if not Curriculum.objects.filter(id=c_id):\n return render(request, 'registration/login.html', {})\n\n # Fetch curriculum with id=c_id\n curriculum = get_object_or_404(Curriculum, id=c_id)\n\n # Fetch all the institutions teaching the curriculum\n institutions_teaching = [\n teach.member.institution for teach in Teach.objects.filter(curriculum=curriculum)]\n\n # Fetch all the subscribers for the curriculum\n subscribed_users = [\n sub.member for sub in Subscription.objects.filter(curriculum=curriculum)]\n\n # Fetch all the contributors for the curriculum\n contributors = [contributors.member for contributors in Contributor.objects.filter(curriculum=curriculum)]\n\n u_obj = Upvote.objects.filter(\n member=current_user, changelog=None, bit=None, curriculum=curriculum)\n curriculum.is_upvoted = len(u_obj) > 0\n\n # Check if user is subscribed to the curriculum with id=c_id\n user_subscription = Subscription.objects.filter(\n member=current_user, curriculum=curriculum, subject__isnull=True).exclude(curriculum__isnull=True)\n\n # Fetch all curriculums taught by user for the subject of\n # current curriculum. (Shouldn't be more than 1)\n user_teach = Teach.objects.filter(\n member=current_user, subject=curriculum.subject)\n\n if request.method == 'POST' and '_subscribe' in request.POST:\n \"\"\"\n Filtering user subscription by only\n allowing records with curriculums not\n being NULL and subjects being NULL\n\n \"\"\"\n # Teach button\n if user_teach:\n if user_teach.first().curriculum.id == c_id:\n teach_button_status = 'UnTeach'\n else:\n teach_button_status = 'Teach'\n else:\n teach_button_status = 'Teach'\n\n context = {'curriculum': curriculum,\n 'sub_status': sub_status,\n 'subscribe_button_status': subscribe_button_status,\n 'current_user': current_user,\n 'teach_button_status': teach_button_status,\n 'institutions_teaching': institutions_teaching,\n 'subscribed_users': subscribed_users,\n 'contributors': contributors,\n }\n return render(request, 'curriculum/show.html', context)\n\n elif request.method == 'POST' and '_teach' in request.POST:\n \"\"\"\n Filtering teach for current user for the\n associated subject. User is only allowed to\n follow one curriculum from one subject.\n\n \"\"\"\n if user_teach:\n if user_teach.first().curriculum.id == c_id:\n user_teach.delete()\n\n # Updating Change Log for the change\n reason = str(current_user.full_name) + ' is not teaching ' + \\\n str(curriculum.title) + ' at their university - ' + \\\n str(current_user.institution) + ' anymore.'\n\n log_obj = ChangeLog(\n member=current_user,\n description=reason,\n curriculum=curriculum,\n bit=None,\n subject=None,\n operation=None,\n )\n log_obj.save()\n\n teach_status = 'Not Teaching Anymore!'\n teach_button_status = 'Teach'\n\n else:\n user_teach.delete()\n\n # Adding the curriculum for teaching\n teach_obj = Teach(\n member=current_user,\n curriculum=curriculum,\n subject=curriculum.subject\n )\n teach_obj.save()\n\n # Updating Change Log for the change\n reason = str(current_user.full_name) + ' is teaching ' + \\\n str(curriculum.title) + ' at their university - ' + \\\n str(current_user.institution)\n\n log_obj = ChangeLog(\n member=current_user,\n description=reason,\n curriculum=curriculum,\n bit=None,\n subject=None,\n operation=None,\n )\n log_obj.save()\n\n teach_status = 'Teaching!'\n teach_button_status = 'UnTeach'\n\n else:\n # Adding the curriculum for teaching\n teach_obj = Teach(\n member=current_user,\n curriculum=curriculum,\n subject=curriculum.subject\n )\n teach_obj.save()\n\n # Updating Change Log for the change\n reason = str(current_user.full_name) + ' is teaching ' + \\\n str(curriculum.title) + ' at their university - ' + \\\n str(current_user.institution)\n\n log_obj = ChangeLog(\n member=current_user,\n description=reason,\n curriculum=curriculum,\n bit=None,\n subject=None,\n operation=None,\n )\n log_obj.save()\n\n teach_status = 'Teaching!'\n teach_button_status = 'UnTeach'\n\n # Subscribe button\n if user_subscription:\n subscribe_button_status = 'Unsubscribe'\n else:\n subscribe_button_status = 'Subscribe'\n\n institutions_teaching = [ teach.member.institution for teach in Teach.objects.filter(curriculum=curriculum)]\n context = {'curriculum': curriculum,\n 'teach_status': teach_status,\n 'teach_button_status': teach_button_status,\n 'subscribe_button_status': subscribe_button_status,\n 'current_user': current_user,\n 'institutions_teaching': institutions_teaching,\n 'subscribed_users': subscribed_users,\n 'contributors': contributors,\n }\n return render(request, 'curriculum/show.html', context)\n\n else:\n \"\"\"\n Assuming the other request would be\n GET request to load the page, placeholder\n text for button can be set\n\n \"\"\"\n\n # Teach button\n if user_teach:\n if user_teach.first().curriculum.id == c_id:\n teach_button_status = 'UnTeach'\n else:\n teach_button_status = 'Teach'\n else:\n teach_button_status = 'Teach'\n\n context = {'curriculum': curriculum,\n 'user_subscription': user_subscription.first(),\n 'teach_button_status': teach_button_status,\n 'current_user': current_user,\n 'institutions_teaching': institutions_teaching,\n 'subscribed_users': subscribed_users,\n 'contributors': contributors,\n }\n return render(request, 'curriculum/show.html', context)",
"def insert_evaluation(connection, evaluation):\n insert_eval = \"\"\"INSERT INTO evaluations(scale_id,corrector_id,corrector_login,\n corrected_id,corrected_login,project_name,\n project_id,rule,begin_at) VALUES(?,?,?,?,?,?,?,?,?)\"\"\"\n try:\n cursor = connection.cursor()\n cursor.execute(insert_eval, evaluation)\n connection.commit()\n except sqlite3.Error as e:\n print(e)",
"def add_car():\n\n make = request.form[\"make\"]\n bodytype = request.form[\"bodytype\"] \n color = request.form[\"color\"]\n seats = request.form[\"seats\"]\n location = request.form[\"location\"]\n costperhour = request.form[\"costperhour\"]\n\n # create a new Car object.\n new_car = Car(make=make, bodytype=bodytype, color=color, seats=seats, location=location, costperhour=costperhour)\n\n # add new car to db\n db.session.add(new_car)\n # commit the new add.\n db.session.commit()\n\n return carSchema.jsonify(new_car)",
"def add_car(matricula, posicio, color, marca):\n global max\n con = lite.connect('parking.db')\n cur = con.cursor()\n if(_formatMatriculaValid(matricula)):\n if(max!=0):\n try:\n cur.execute(\"INSERT INTO cotxes(id_cotxe, color, marca) values (?,?,?);\", (matricula, color, marca))\n cur.execute(\"INSERT INTO parking(id_cotxe, placa, entrada) values (?,?, DATETIME('now'));\",(matricula, posicio))\n con.commit()\n max -=1\n except lite.IntegrityError:\n print \"Error.\"\n else:\n print\"Parking ple. El cotxe\",matricula,\"no ha pogut entrar.\"\n else:\n print(\"Format matricula invalid.\")\n con.close()",
"def save_trajectory(trajectory):\n query = \"INSERT INTO trajectories (idUser, startTime, endTime) VALUES (%(id_user)s, %(start_time)s, %(end_time)s)\"\n\n trajectory_data = {\n 'id_user': trajectory.id_user,\n 'start_time': trajectory.start_time,\n 'end_time': trajectory.end_time\n }\n\n inserted_id = qr.run_write_query(query, trajectory_data)\n trajectory.id = inserted_id",
"def add_course(race_id):\n # use the CourseForm\n form = CourseForm()\n\n # check if this is a post request and all fields are valid\n if form.validate_on_submit():\n # get race object from database\n race = Race.query.get(race_id)\n host = race.host_school\n location = race.location\n\n # create a new course based on input on form from user\n course = Course(name=form.course_name.data,\n description = form.course_description.data,\n distance = form.distance.data,\n location_id=location.id)\n\n # add course to database and commit changes\n db.session.add(course)\n db.session.commit()\n\n # set course for race to the newly created course and commit changes\n race.course_id = course.id\n db.session.commit()\n\n # set flash to notify user that course setup was successfull\n flash(f\"Successfullly added course: '{course.name}' at location:\"\n f\"'{location.name}'\", 'success')\n\n # return to race_setup.html\n return redirect(url_for('setup.race_setup', race_id=race_id))\n\n # if this is get request, render add_course.html\n return render_template('add_course.html', form=form)",
"def before_request():\r\n g.db = mysql.connection.cursor()\r\n g.db.execute('CREATE TABLE IF NOT EXISTS monthly_data \\\r\n \t(Sr INT PRIMARY KEY AUTO_INCREMENT, Name TEXT,id TEXT, Item TEXT, Date TEXT, Price REAL)')",
"def add_course(self, term, schedule, crn):\n query = {'Term': term.code,\n 'Schedule': schedule,\n 'CourseID': crn,\n 'ShowDebug': 0,\n '_': int(float(time.time()) * 10**3)}\n\n self.get(self.ADD_COURSE_ENDPOINT, params=query)",
"def add_location(race_id):\n # use the LocationCourseForm\n form = LocationCourseForm()\n\n # confirm this is a post request and all fields are valid\n if form.validate_on_submit():\n # get race from database\n race = Race.query.get(race_id)\n host = race.host_school\n\n # create location and update database\n location = Location(\n name = form.location_name.data,\n street_address = (f\"{form.street_number.data} \"\n f\"{form.street_name.data}\"),\n city = form.city.data,\n state_abbr = form.state_abbr.data,\n zip = form.zip.data\n )\n db.session.add(location)\n db.session.commit()\n host.locations.append(location)\n db.session.commit()\n\n # create course and update database\n course = Course(name=form.course_name.data,\n description = form.course_description.data,\n distance = form.distance.data,\n location_id=location.id\n )\n db.session.add(course)\n db.session.commit()\n race.location_id = location.id\n race.course_id = course.id\n db.session.commit()\n\n # set flash to notify user that location/course setup was successfull\n flash(f\"Successfullly added course: '{course.name}' at location:\"\n f\"'{location.name}'\", 'success')\n\n # return to race_setup.html\n return redirect(url_for('setup.race_setup', race_id=race_id))\n\n # if this is get request, render add_location.html\n return render_template('add_location.html', form=form)"
]
| [
"0.63018584",
"0.6080083",
"0.5960781",
"0.56396157",
"0.5505915",
"0.542898",
"0.5381422",
"0.53558385",
"0.5339416",
"0.5280498",
"0.52722335",
"0.5269664",
"0.52507955",
"0.5232106",
"0.52166164",
"0.52114844",
"0.5197261",
"0.5188744",
"0.51863056",
"0.5172717",
"0.5084438",
"0.5068446",
"0.5067328",
"0.5063899",
"0.5062684",
"0.50554603",
"0.5036578",
"0.5024892",
"0.5004967",
"0.4998392"
]
| 0.6972953 | 0 |
Retrieves all descriptions of a Goal, potentially across the context of multiple curricula and returns a description | def fetch_goal_context_description(self, goalId):
GET_ALL_CONTEXTS = """SELECT curriculum_name, description FROM Goal WHERE id = %s """
try:
self.db_cursor.execute(GET_ALL_CONTEXTS,(goalId,))
descriptions = self.db_cursor.fetchall()
if len(descriptions) <= 0:
return None
ret_str = []
ret_str.append(f"Goal #{goalId}\n")
ret_str.append(f"From {descriptions[0][0]}: {descriptions[0][1]}")
if len(descriptions) > 1:
ret_str.append(" (and more)")
return ''.join(ret_str)
except:
logging.warning("DBAdapter: Error- cannot retrieve goal contexts from id: " + str(goalId))
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_description(self):",
"def cal_desc(self):\n desc = \"\"\n desc += \"Requested by \"\n orgs = self.org.all()\n if len(orgs) > 0:\n for org in orgs:\n desc += org.name + \", \"\n desc = desc[:-2] + \".\\n\" # removes trailing comma\n ccs = self.ccinstances.all()\n if len(ccs) > 0:\n desc += \"Crew Chiefs: \"\n for cc in ccs:\n desc += cc.crew_chief.get_full_name() + \" [\" + (cc.service.shortname if cc.service else cc.category.name) + \"], \"\n desc = desc[:-2] + \".\\n\" # removes trailing comma\n if self.description:\n desc += self.description + \"\\n\"\n return desc",
"def get_description():\n desc = {\"description\": __doc__, \"data\": True, \"cache\": 600}\n today = datetime.date.today()\n desc[\"arguments\"] = [\n dict(\n type=\"csector\",\n name=\"csector\",\n default=\"IA\",\n label=\"Select state/sector to plot\",\n ),\n dict(\n type=\"date\",\n name=\"sdate\",\n default=f\"{today.year}/01/01\",\n label=\"Start Date:\",\n min=\"2000/01/04\",\n max=today.strftime(\"%Y/%m/%d\"),\n ),\n dict(\n type=\"date\",\n name=\"edate\",\n default=today.strftime(\"%Y/%m/%d\"),\n label=\"End Date:\",\n min=\"2000/01/04\",\n max=today.strftime(\"%Y/%m/%d\"),\n ),\n dict(\n type=\"select\",\n name=\"d\",\n default=\"0\",\n options=PDICT,\n label=\"Select Drought Classification (at and above counted):\",\n ),\n dict(\n type=\"select\",\n name=\"w\",\n default=\"percent\",\n options=PDICT2,\n label=\"How to express time for plot:\",\n ),\n dict(type=\"cmap\", name=\"cmap\", default=\"plasma\", label=\"Color Ramp:\"),\n ]\n return desc",
"def description():",
"def get_description():\n raise NotImplementedError",
"def description(self):",
"def get_description():\r\n return{\"I'll never yield!\":\"Grants a shield.\",\r\n \"Stay still!\":\"Affected unit cannot act in their turn.\"\r\n }",
"def get_description(self):\n pass",
"def get_description():\n desc = dict()\n desc['cache'] = 86400\n desc['data'] = True\n desc['description'] = \"\"\"This chart presents a histogram of issuance times\n for a given watch, warning, or advisory type for a given office.\"\"\"\n desc['arguments'] = [\n dict(type='networkselect', name='station', network='WFO',\n default='DMX', label='Select WFO:'),\n dict(type='phenomena', name='phenomena',\n default='WC', label='Select Watch/Warning Phenomena Type:'),\n dict(type='significance', name='significance',\n default='W', label='Select Watch/Warning Significance Level:'),\n ]\n return desc",
"def Description(self) -> str:",
"def Description(self) -> str:",
"def cal_desc(self):\n desc = ''\n desc += 'Requested by '\n orgs = self.event.org.all()\n for org in orgs:\n desc += org.name + ', '\n desc = desc[:-2] + '.\\n' # removes trailing comma\n desc += 'Crew Chief: ' + self.crew_chief.get_full_name() + '\\n'\n if self.event.description:\n desc += self.event.description + '\\n'\n return desc",
"def _generateDescription(self, obj, **args):\n result = []\n if obj.description:\n label = self._script.utilities.displayedLabel(obj) or \"\"\n name = obj.name or \"\"\n desc = obj.description.lower()\n if not (desc in name.lower() or desc in label.lower()):\n result.append(obj.description)\n return result",
"def get_description(self):\n raise NotImplementedError",
"def get_description(self):\n if self._visited is False:\n self._visited = True\n return self._desc\n else:\n return self._sdesc",
"def get_description(self) -> str:\n pass",
"def get_description(soup):\n\n standard_head = ['Description', 'Getting There', 'Protection', 'Location']\n\n # grab all h3 orange header sections on the page\n detail = {}\n other_text = []\n for h3 in soup.find_all('h3', { 'class': \"dkorange\" }):\n \n # text is the element after the h3\n body = h3.next_sibling\n \n if isinstance(body, NavigableString):\n # ignore sections from here on like 'Climbing Season' and such\n break\n else:\n # these are the valuable text sections\n body = body.get_text()\n body = body.encode('utf-8', errors = 'ignore')\n head = h3.get_text().encode('utf-8', errors = 'ignore')\n head = head.strip('\\xc2\\xa0')\n\n if head in standard_head:\n head = head.replace(' ','_').lower()\n detail[head] = body\n else:\n other_text.append(body)\n\n # combine text into a full description\n if len(other_text) > 0:\n if 'description' in detail:\n # combine description with other text -- questionable but appropriate\n detail['description'] = detail['description'] + '\\n'.join(other_text)\n else:\n detail['description'] = '\\n'.join(other_text)\n\n # blank if there is no text at all\n if 'description' not in detail:\n detail['description'] = ''\n\n return detail",
"def get_description():\n desc = {\"description\": __doc__, \"data\": True}\n today = datetime.date.today()\n thisyear = today.year\n desc[\"arguments\"] = [\n dict(\n type=\"station\",\n name=\"station\",\n default=\"IATDSM\",\n label=\"Select Station:\",\n network=\"IACLIMATE\",\n ),\n dict(\n type=\"select\",\n options=PDICT,\n name=\"var\",\n default=\"precip\",\n label=\"Accumulate Precipitation or Snow?\",\n ),\n dict(\n type=\"year\",\n name=\"year1\",\n default=thisyear,\n label=\"Additional Year to Plot:\",\n ),\n dict(\n type=\"year\",\n name=\"year2\",\n optional=True,\n default=(thisyear - 1),\n label=\"Additional Year to Plot: (optional)\",\n ),\n dict(\n type=\"year\",\n name=\"year3\",\n optional=True,\n default=(thisyear - 2),\n label=\"Additional Year to Plot: (optional)\",\n ),\n dict(\n type=\"sday\",\n name=\"sdate\",\n default=\"0101\",\n label=\"Start Day of Year for Plot:\",\n ),\n dict(\n optional=True,\n type=\"sday\",\n name=\"edate\",\n default=f\"{today:%m%d}\",\n label=\"End Day of Year for Plot:\",\n ),\n dict(\n type=\"int\",\n default=\"3\",\n label=\"Number of missing days to allow before excluding year\",\n name=\"m\",\n ),\n ]\n return desc",
"def get_description():\n desc = {\"description\": __doc__, \"data\": True}\n desc[\"arguments\"] = [\n dict(\n type=\"station\",\n name=\"station\",\n default=\"IATDSM\",\n label=\"Select Station:\",\n network=\"IACLIMATE\",\n ),\n dict(\n type=\"select\",\n name=\"var\",\n default=\"spi\",\n options=PDICT,\n label=\"Select which metric to plot:\",\n ),\n dict(\n type=\"select\",\n name=\"c\",\n default=\"ncei91\",\n options=PDICT2,\n label=\"Which climatology to use for averages:\",\n ),\n dict(\n type=\"int\",\n name=\"days\",\n default=90,\n label=\"Over how many trailing days to compute the metric?\",\n ),\n ]\n return desc",
"def GetAllDescriptionOfCost():\n\n logs.logger.debug(\n \"Start to get back all descriptions of Cost objects from database.\")\n try:\n searchedCostsItems = session.query(Cost.Cost).all()\n logs.logger.info(\n \"Get back all descriptions of Cost objects from database.\")\n return [CostItems.description for CostItems in searchedCostsItems]\n except Exception as e:\n logs.logger.error(e, exc_info=True)",
"def test_with_multiple_descriptions():\n soup = generate_case(\"with_descriptions\")\n\n tests.html_schema_doc_asserts.assert_descriptions(\n soup,\n [\n \"Exact address\",\n \"Exact address\",\n \"Delivery info depending on the delivery type\",\n \"The delivery is a gift, no prices displayed\",\n ],\n )",
"def get_description():\n desc = {\"description\": __doc__, \"data\": True}\n dt = datetime.date.today() - datetime.timedelta(days=1)\n desc[\"arguments\"] = [\n {\n \"type\": \"select\",\n \"name\": \"mode\",\n \"default\": \"normal\",\n \"label\": \"Application Mode:\",\n \"options\": PDICT,\n },\n dict(\n type=\"state\",\n name=\"state\",\n default=\"IA\",\n label=\"Select State:\",\n ),\n dict(\n type=\"date\",\n name=\"date\",\n default=dt.strftime(\"%Y/%m/%d\"),\n label=\"Retroactive Date of Plot\",\n ),\n dict(\n type=\"int\",\n name=\"d1\",\n default=30,\n label=\"Over how many trailing days to compute the metric?\",\n ),\n dict(\n type=\"int\",\n name=\"d2\",\n default=60,\n label=\"Over how many trailing days to compute the metric?\",\n ),\n dict(\n type=\"int\",\n name=\"d3\",\n default=90,\n label=\"Over how many trailing days to compute the metric?\",\n ),\n ]\n return desc",
"def getDescription(self):\n raise NotImplementedError",
"def description_mega(self, html): # pylint: disable=too-many-statements,too-many-branches\n description_list = []\n with suppress(Exception):\n '''\n Tested on\n * https://economictimes.indiatimes.com/news/economy/policy/government-mops-up-rs-8660-cr-from-disinvestment-in-02/articleshow/33105933.cms\n <meta content=\"The total disinvestment realisation of the government during 2002 topped Rs 8,660 crore. The cabinet committee on disinvestment (CCD) had cleared transactions worth Rs 6,168 crore during the year.\" name=\"description\">\n * https://timesofindia.indiatimes.com/city/bengaluru/ISROs-second-launch-pad-to-be-sent-by-March-end/articleshow/3801270.cms\n <meta name=\"description\" content=\"BANGALORE: The second launch pad for the Indian Space Research Organisation will be dispatched to Sriharikota by the end of March. The Mobile Launch P\">\n '''\n meta_name_description = html.find('meta', {'name': 'description'})\n description_list.append(\n self.text_cleaning(meta_name_description['content']))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.deccanherald.com/content/1368/agriculture-department-urged-regulate-fertilisers.html\n <meta property=\"og:description\" content=\"Farmers will be happy only if they get good rains and sufficient fertilisers. They were is deep trouble due to the improper supply of fertilisers.\">\n * https://sports.ndtv.com/cricket/we-cant-influence-indian-high-commission-for-visas-pcb-1594242\n <meta property=\"og:description\" content=\"Pakistan Cricket Board made it clear that it had done everything under its power to get the visas for its cricketers to play in the IPL next year.\">\n '''\n meta_property_og_description = html.find(\n 'meta', {'property': 'og:description'})\n description_list.append(\n self.text_cleaning(meta_property_og_description['content']))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.independent.co.uk/news/world/americas/elijah-mcclain-death-colorado-police-black-lives-matter-george-floyd-police-a9584366.html\n <meta name=\"twitter:description\" content=\"'Demand these officers are taken off duty, and that a more in-depth investigation is held', page reads\">\n * https://nypost.com/2010/09/27/brooklyn-tea-party-rallies-against-ground-zero-mosque-multiculturalism/\n <meta name=\"twitter:description\" content=\"About 125 people gathered at a recent Bay Ridge rally of the Brooklyn Tea Party to protest a variety of hot subjects — especially the planned Ground Zero mosque, according to a Brooklyn Ink\">\n '''\n meta_name_twitter_description = html.find(\n 'meta', {'name': 'twitter:description'})\n description_list.append(\n self.text_cleaning(meta_name_twitter_description['content']))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.standard.co.uk/news/uk/boris-johnson-u-turn-free-school-meals-marcus-rashford-a4470506.html\n <meta property=\"twitter:description\" content=\"'THIS is England in 2020'\">\n * https://www.express.co.uk/news/politics/1369685/brexit-news-uk-eu-trade-deal-france-fishing-emmanuel-macron-no-deal-latest\n <meta property=\"twitter:description\" content=\"FRENCH fishermen have lashed out at Emmanuel Macron, warning he is playing a "dangerous game" and has "overstepped the mark" by threatening to veto a post-Brexit trade deal with the UK.\">\n '''\n meta_property_twitter_desc = html.find(\n 'meta', {'property': 'twitter:description'})\n description_list.append(\n self.text_cleaning(meta_property_twitter_desc['content']))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.indiatoday.in/india/story/pm-modi-launch-covid-vaccination-drive-jan-16-cowin-app-coronavirus-covaxin-covishield-1758628-2021-01-13\n <meta itemprop=\"description\" content=\"Prime Minister Narendra Modi will kickstart the Covid-19 vaccination programme in India with a virtual launch on January 16, sources have told India Today.\">\n * https://indianexpress.com/article/world/print/four-killed-as-armed-militants-storm-5-star-hotel-in-pakistans-gwadar-port-city-police-5723193/\n <meta itemprop=\"description\" content=\"A shootout between the militants and the security forces broke out at the hotel as the anti-terrorism force, the Army and the Frontier Corps were called in, Gwadar Station House Officer (SHO) Aslam Bangulzai said.\">\n '''\n meta_itemprop_description = html.find('meta',\n {'itemprop': 'description'})\n description_list.append(\n self.text_cleaning(meta_itemprop_description['content']))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.cnbc.com/2020/12/25/the-plant-based-meat-industry-is-on-the-rise-but-challenges-remain.html\n <meta itemprop=\"description\" name=\"description\" content=\"Demand for meat alternatives has grown and will continue to rise, but the industry still has hurdles to overcome in different parts of the world, analysts said.\">\n * https://www.oneindia.com/india/congress-leader-dk-shivakumar-to-appear-before-cbi-in-disproportionate-assets-case-today-3180984.html\n <meta name=\"description\" itemprop=\"description\" content=\"On October 5, the CBI conducted raids at 14 locations, including in Karnataka, Delhi and Mumbai at the premises belonging to Shivakumar and others, and recovered Rs 57 lakh cash and several documents, including property documents, bank related information, computer hard disk. \">\n '''\n meta_name_itemprop_description = html.find(\n 'meta', {\n 'name': 'description',\n 'itemprop': 'description'\n })\n description_list.append(\n self.text_cleaning(meta_name_itemprop_description['content']))\n\n with suppress(Exception):\n '''\n Tested on\n * https://scroll.in/field/979390/they-can-beat-australia-in-their-own-den-shastri-backs-india-s-fabulous-five-quicks-to-shine\n <meta name=\"dcterms.description\" content=\"The India coach said his team’s pace unit was the best in the world, despite being likely to be without the injured Ishant Sharma.\">\n * https://scroll.in/field/979393/champions-league-last-gasp-wins-take-juventus-chelsea-and-sevilla-into-last-16-barcelona-cruise\n <meta name=\"dcterms.description\" content=\"They are the first teams to make it out of the group stage, doing so with two games to spare.\">\n '''\n meta_name_dcterms_description = html.find(\n 'meta', {'name': 'dcterms.description'})\n description_list.append(\n self.text_cleaning(meta_name_dcterms_description['content']))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.express.co.uk/news/weather/1370081/BBC-Weather-Europe-snow-forecast-cold-December-update-video-vn\n <div class=\"text-description\"><p><span>BBC Weather meteorologist Stav Danaos forecast unsettled weather across the </span><span>Mediterranean for the rest of the week. He added a blocking area of high pressure across Russia was contributing to the unsettling weather.</span></p></div>\n * https://www.express.co.uk/news/politics/1383306/Brexit-live-latest-brexit-deal-Northern-Ireland-customs-boris-johnson-john-redwood\n <div class='text-description'><p>Earlier today, Boris Johnson suggested some fishing businesses in Scotland would receive compensation as he defended...</p></div>\n '''\n div_class_text_description = html.find(\n 'div', {'class': 'text-description'})\n description_list.append(\n self.text_cleaning(div_class_text_description.text))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.usatoday.com/story/news/nation/2020/12/07/north-atlantic-right-whale-endangered-species-newborns/6484190002/\n <div...data-ss-d=\"Two North Atlantic right whale newborns have been spotted in the last week at the start of calving season, providing hope for an endangered species.\"...>\n * https://www.usatoday.com/story/sports/mls/2020/12/07/mls-cup-2020-seattle-sounders-advance-play-columbus-crew-title/6487291002/\n <div...data-ss-d=\"The Seattle Sounders scored two late goals to complete a dramatic rally over Minnesota United and advance to MLS Cup to play the Columbus Crew.\"...>\n '''\n div_data_ssd = html.find('div', {'data-ss-d': True})\n description_list.append(\n self.text_cleaning(div_data_ssd['data-ss-d']))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.indiatoday.in/technology/news/story/amazon-great-republic-day-sale-announced-from-january-20-deals-bank-offers-and-more-1758622-2021-01-13\n <div class=\"story-kicker\"><h2>Amazon's Great Republic Day Sale begins January 20 but Prime members will get 24 hours early access on deals.</h2></div>\n * https://www.indiatoday.in/sports/cricket/story/a-win-at-gabba-will-give-india-their-greatest-test-series-victory-ever-says-akhtar-1758619-2021-01-13\n <div class=\"story-kicker\"><h2>Former Pakistan fast bowler Shoaib Akhtar lauded India for the fight they have shown in the series so far and said that they should go on to win the final Test in Brisbane.</h2></div>\n '''\n div_class_story_kicker = html.find('div',\n {'class': 'story-kicker'})\n description_list.append(\n self.text_cleaning(div_class_story_kicker.text))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.espncricinfo.com/story/vitality-t20-blast-mitchell-claydon-misses-sussex-s-t20-blast-defeat-after-hand-sanitiser-ball-tampering-ban-1234150\n <p class=\"article-summary\">Seamer will miss first two games of 2021 as well after nine-match ban imposed by CDC</p>\n * https://www.espncricinfo.com/series/vitality-blast-2020-1207645/nottinghamshire-vs-leicestershire-1st-quarter-final-1207789/match-report\n <p class=\"article-summary\">Nottinghamshire progress on higher Powerplay score after securing dramatic tie off last ball</p>\n '''\n p_class_article_summary = html.find('p',\n {'class': 'article-summary'})\n description_list.append(\n self.text_cleaning(p_class_article_summary.text))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.nytimes.com/2020/01/31/learning/is-it-offensive-for-sports-teams-and-their-fans-to-use-native-american-names-imagery-and-gestures.html\n <p id=\"article-summary\" class=\"css-w6ymp8 e1wiw3jv0\">The Kansas City Chiefs will face the San Francisco 49ers for Super Bowl LIV. Chiefs fans regularly use a “tomahawk chop” to urge on their beloved team: Is it offensive?</p>\n * https://www.nytimes.com/2020/01/09/world/middleeast/iran-plane-crash-ukraine.html\n <p id=\"article-summary\" class=\"css-w6ymp8 e1wiw3jv0\">Western intelligence showed that Iran was responsible for the plane crash, suggesting that the deaths of those aboard were a consequence of the heightened tensions between Washington and Iran. </p>\n '''\n p_id_article_summary = html.find('p', {'id': 'article-summary'})\n description_list.append(\n self.text_cleaning(p_id_article_summary.text))\n\n with suppress(Exception):\n '''\n Tested on\n * https://economictimes.indiatimes.com/industry/services/education/indian-universities-look-abroad-for-success-at-home/articleshow/5957175.cms\n <h2 class=\"summary\">Foreign universities may soon be able to set up in India but some of their Indian counterparts are looking in the other direction — to better equip students for the demands of the global economy.</h2>\n * https://economictimes.indiatimes.com/industry/transportation/railways/conviction-rate-in-theft-cases-in-central-railways-mumbai-division-falls-steeply/articleshow/48554953.cms\n <h2 class=\"summary\">According to official data, the conviction rate in theft cases of railway properties has witnessed a steep fall in Mumbai Division of Central Railway.</h2>\n '''\n h2_class_summary_description = html.find('h2',\n {'class': 'summary'})\n description_list.append(\n self.text_cleaning(h2_class_summary_description.text))\n\n with suppress(Exception):\n '''\n Tested on\n * https://sports.ndtv.com/india-vs-england-2020-21/ind-vs-eng-virat-kohli-reflects-on-battling-depression-during-2014-england-tour-2373999\n <h2 class=\"sp-descp\">India vs England: Virat Kohli opened up about dealing with depression on India's 2014 tour of England where Kohli endured a horror run with the bat.</h2>\n * https://sports.ndtv.com/cricket/we-cant-influence-indian-high-commission-for-visas-pcb-1594242\n <h2 class=\"sp-descp\">Pakistan Cricket Board made it clear that it had done everything under its power to get the visas for its cricketers to play in the IPL next year.</h2>\n '''\n h2_class_sp_descp_description = html.find('h2',\n {'class': 'sp-descp'})\n description_list.append(\n self.text_cleaning(h2_class_sp_descp_description.text))\n\n with suppress(Exception):\n '''\n Tested on\n * https://indianexpress.com/article/news-archive/days-are-not-far-when-kashmiri-pandits-would-return-to-their-homes-with-dignity-jk-bjp-4842449/\n <h2 itemprop=\"description\" class=\"synopsis\">\"Those days are not far when the displaced people will return to their Kashmir with dignity and honour. The BJP will leave no stone unturned in solving the problems of the hapless people who were forced to leave the Valley,\" Jammu and Kashmir BJP unit chief Sat Sharma said. </h2>\n * https://indianexpress.com/article/india/web/bjp-mp-karandlaje-challenges-karnataka-cm-siddaramaiah-govt-to-arrest-her-4996043/\n <h2 itemprop=\"description\" class=\"synopsis\">An FIR was filed against BJP MP Shobha Karandlaje on charges of provoking people to cause riots, disturbing communal harmony and spreading rumours.</h2>\n '''\n h2_itemprop_description = html.find('h2',\n {'itemprop': 'description'})\n description_list.append(\n self.text_cleaning(h2_itemprop_description.text))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.business-standard.com/article/current-affairs/death-of-galaxy-galactic-collision-spews-gases-equal-to-10-000-suns-a-year-121011300543_1.html\n <h2 class=\"alternativeHeadline\">The merging galaxy formed 4.5 billion years ago is dubbed ID2299 and is ejecting gases equivalent to 10,000 Suns-worth of gas a year</h2>\n * https://www.business-standard.com/article/international/wb-economist-china-will-need-to-learn-to-restructure-emerging-market-debt-121011300034_1.html\n <h2 class=\"alternativeHeadline\">Increasing debt distress in emerging markets means that China will need to start restructuring debts in the same way that Paris Club lenders did in past crises, World Bank Chief Economist said</h2>\n '''\n h2_class_alternative_headline = html.find(\n 'h2', {'class': 'alternativeHeadline'})\n description_list.append(\n self.text_cleaning(h2_class_alternative_headline.text))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.express.co.uk/news/world/1369648/India-news-mystery-illness-coronavirus-covid-Andhra-Pradesh-eluru-disease-cause-ont\n <h3>OFFICIALS in India are reportedly seeking to manage panic in the Indian state of Andhra Pradesh due to a mysterious illness spreading in the district.</h3>\n * https://www.express.co.uk/news/politics/1383306/Brexit-live-latest-brexit-deal-Northern-Ireland-customs-boris-johnson-john-redwood\n <h3>A HUGE new fishing row has erupted between Scottish fishermen anf the UK Government, with BBC News Political Editor Laura Kuenssberg warning: \"This could get messy.\"</h3>\n '''\n h3_description = html.find('h3')\n description_list.append(self.text_cleaning(h3_description.text))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.independent.co.uk/arts-entertainment/tv/news/ratched-netflix-trigger-warning-child-abuse-suicide-violence-sarah-paulson-b571405.html\n <h2 class=\"sc-qYhdC bflsCm\"><p>Despite presence of warning over graphic content, fans have called for more</p></h2>\n * https://www.independent.co.uk/arts-entertainment/tv/news/bridgerton-violet-actor-ruth-gemmell-tracy-beaker-b1780757.html\n <h2 class=\"sc-oTcDH eZHAcN\"><p>Gemmell starred in the 2004 CBBC film Tracy Beaker: The Movie of Me</p></h2>\n '''\n header_id_articleheader = html.find('header',\n {'id': 'articleHeader'})\n header_two = header_id_articleheader.find('h2')\n description_list.append(self.text_cleaning(header_two.text))\n\n with suppress(Exception):\n '''\n Tested on\n * https://scroll.in/article/979318/what-is-the-extent-of-caste-segregation-in-indian-villages-today-new-data-gives-us-an-idea\n <h2>‘The extent of intra-village segregation in Karnataka is greater than the local black-white segregation in the American South.’</h2>\n * https://scroll.in/latest/979410/khichdification-ima-demands-withdrawal-of-move-allowing-ayurveda-doctors-to-perform-surgery\n <h2>The medical body said that the move should not be seen in isolation, referring to other government decisions ‘legitimising Mixopathy’.</h2>\n '''\n header = html.find('header')\n description_list.append(\n self.text_cleaning(header.find_next('h2').text))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.euronews.com/2020/12/08/charlie-hebdo-trial-prosecutors-request-30-year-sentence-for-fugitive-widow-of-attacker\n <script type=\"application/ld+json\"... '@graph': [\"description\": \"Prosecutors have asked for sentences ranging from 5 years to life imprisonment for the defendants in the Charlie Hebdo trial, including the fugitive widow of one of the attackers.\"...]...>\n * https://www.euronews.com/2020/12/08/france-s-next-aircraft-carrier-to-be-nuclear-powered-macron-confirms\n <script type=\"application/ld+json\"... '@graph': [\"description\": \"France's current flagship warship is to be retired in 2038. It will be replaced by a bigger, nuclear-powered model, Macron said on Tuesday.\"...]...>\n '''\n first_script = html.find('script', {'type': 'application/ld+json'})\n data = json.loads(first_script.string, strict=False)\n description_list.append(\n self.text_cleaning(data['@graph'][0]['description']))\n\n with suppress(Exception):\n scripts = html.find_all('script', {'type': 'application/ld+json'})\n scripts = [script for script in scripts if script is not None]\n for script in scripts:\n with suppress(Exception):\n '''\n Tested on\n * https://www.espncricinfo.com/story/ipl-2020-jofra-archer-thriving-in-different-type-of-pressure-at-ipl-says-rajasthan-royals-team-mate-jos-buttler-1234126\n <script type='application/ld+json'...\"description\":\"Fifty-over cricket must take a back seat in build-up to T20 World Cup, says senior batsman\"...>\n '''\n data = json.loads(script.string, strict=False)\n if isinstance(data, list):\n data = data[0]\n if data[\"@type\"] == \"NewsArticle\" or data[\n \"@type\"] == \"WebPage\":\n if data[\"description\"]:\n description_list.append(\n self.text_cleaning(data[\"description\"]))\n with suppress(Exception):\n data = json.loads(script.string, strict=False)\n if data[\"@type\"] == \"NewsArticle\":\n if isinstance(data[\"video\"], list):\n description_list.append(\n self.text_cleaning(\n data[\"video\"][0][\"description\"]))\n elif not isinstance(data[\"video\"], list):\n description_list.append(\n self.text_cleaning(\n data[\"video\"][\"description\"]))\n description_list = [\n description for description in description_list\n if description != ''\n ]\n if not description_list:\n return \" \"\n best_description = max(sorted(set(description_list)),\n key=description_list.count)\n return best_description",
"def curriculum_goal_list(self, curriculum_name):\n ret = []\n try:\n self.db_cursor.execute(\n \"\"\"SELECT id, description FROM Goal WHERE curriculum_name = %s\"\"\",\n (curriculum_name,))\n goals = self.db_cursor.fetchall()\n\n if goals:\n go = Goal()\n go.curriculum_name = curriculum_name\n for g in goals:\n go.description = g[1]\n go.id = g[0]\n ret.append(go)\n\n except:\n logging.warning(\"DBAdapter: Error- cannot retrieve curriculum goal list: \" + str(curriculum_name))\n\n return ret",
"def get_description(obj):\n if not isinstance(obj.data, dict):\n return \"No description found.\"\n abstract = \"\"\n authors = []\n categories = []\n final_identifiers = []\n\n # Get identifiers\n dois = get_value(obj.data, \"dois.value\", [])\n if dois:\n final_identifiers.extend(dois)\n\n system_no = get_value(obj.data, \"external_system_numbers.value\", [])\n if system_no:\n final_identifiers.extend(system_no)\n\n # Get subject categories, adding main one first. Order matters here.\n record_categories = get_value(obj.data, \"arxiv_eprints.categories\", []) + \\\n get_value(obj.data, \"subject_terms.term\", [])\n for category_list in record_categories:\n if isinstance(category_list, list):\n categories.extend(category_list)\n else:\n categories.append(category_list)\n categories = list(OrderedDict.fromkeys(categories)) # Unique only\n abstract = get_value(obj.data, \"abstracts.value\", [\"\"])[0]\n authors = obj.data.get(\"authors\", [])\n return render_template('inspire_workflows/styles/harvesting_record.html',\n object=obj,\n authors=authors,\n categories=categories,\n abstract=abstract,\n identifiers=final_identifiers)",
"def get_items(data, requisites, formatted):\n returndata = \"\"\n traits = requisites['trait']\n allergens = requisites['allergens']\n\n if formatted:\n prefix = '\\t'\n suffix = '\\n'\n else:\n prefix = ''\n suffix = ', '\n\n for course in data['menu']['meal']['course']:\n item_data = []\n datatype = type(course['menuitem'])\n\n if datatype is list:\n item_data += course['menuitem']\n else:\n item_data.append(course['menuitem'])\n\n for item in item_data:\n if check_item_specifications(item, traits, allergens) and 'No Service at this Time' not in item['name']:\n returndata += (prefix + (item['name']).rstrip(', ') + suffix)\n\n return returndata",
"def get_description():\n desc = {\"description\": __doc__}\n sts = utc() - timedelta(hours=26)\n ets = utc() - timedelta(hours=2)\n desc[\"arguments\"] = [\n {\n \"type\": \"datetime\",\n \"name\": \"sts\",\n \"default\": sts.strftime(\"%Y/%m/%d %H00\"),\n \"label\": \"Start Timestamp (UTC):\",\n \"min\": \"1986/01/01 0000\",\n },\n {\n \"type\": \"datetime\",\n \"name\": \"ets\",\n \"default\": ets.strftime(\"%Y/%m/%d %H00\"),\n \"label\": (\n \"End Timestamp [inclusive] (UTC), \"\n \"interval must be less than 4 days\"\n ),\n \"min\": \"1986/01/01 0000\",\n },\n {\n \"type\": \"select\",\n \"options\": PDICT,\n \"default\": \"min\",\n \"name\": \"w\",\n \"label\": \"Which statistic to compute\",\n },\n {\n \"type\": \"csector\",\n \"name\": \"csector\",\n \"default\": \"IA\",\n \"label\": \"Select state/sector\",\n },\n {\n \"type\": \"select\",\n \"options\": PDICT2,\n \"default\": \"user\",\n \"label\": \"Plotting mode (user defined color-ramp or freezing)\",\n \"name\": \"mode\",\n },\n {\n \"type\": \"cmap\",\n \"name\": \"cmap\",\n \"default\": \"gnuplot2\",\n \"label\": \"Color Ramp:\",\n },\n ]\n return desc",
"def describe(self) -> str:",
"def get_goal(self, new_goal):\n\n GOAL = \"\"\"SELECT COUNT(*) FROM Section WHERE id = %s\"\"\"\n\n ret = None\n try:\n self.db_cursor.execute(\n \"\"\"SELECT description FROM Goal WHERE id = %s AND curriculum_name = %s\"\"\",\n (new_goal.id, new_goal.curriculum_name,))\n c = self.db_cursor.fetchall()\n ret = Goal()\n if c:\n ret.description = c[0][0]\n ret.id = new_goal.id\n ret.curriculum_name = new_goal.curriculum_name\n else:\n ret = None\n\n except:\n logging.warning(\"DBAdapter: Error- cannot retrieve goal: \" + str(new_goal.id))\n\n return ret"
]
| [
"0.6394918",
"0.6280152",
"0.6215949",
"0.61783165",
"0.6024815",
"0.59987986",
"0.5982599",
"0.5982384",
"0.5935226",
"0.5835749",
"0.5835749",
"0.58047986",
"0.58000565",
"0.5788512",
"0.57825905",
"0.5774149",
"0.5764935",
"0.5763493",
"0.57389426",
"0.5721243",
"0.56860745",
"0.5684388",
"0.56439656",
"0.5643887",
"0.56381106",
"0.56317186",
"0.5627773",
"0.5626441",
"0.5615047",
"0.56063366"
]
| 0.62975013 | 1 |
Function to retrieve goal from the db | def get_goal(self, new_goal):
GOAL = """SELECT COUNT(*) FROM Section WHERE id = %s"""
ret = None
try:
self.db_cursor.execute(
"""SELECT description FROM Goal WHERE id = %s AND curriculum_name = %s""",
(new_goal.id, new_goal.curriculum_name,))
c = self.db_cursor.fetchall()
ret = Goal()
if c:
ret.description = c[0][0]
ret.id = new_goal.id
ret.curriculum_name = new_goal.curriculum_name
else:
ret = None
except:
logging.warning("DBAdapter: Error- cannot retrieve goal: " + str(new_goal.id))
return ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_goal(self):\n pass",
"def retrieve_from_db(self):\n pass",
"def query(self):",
"def get_test_goal(context, **kw):\n obj_cls = objects.Goal\n db_data = db_utils.get_test_goal(**kw)\n obj_data = _load_related_objects(context, obj_cls, db_data)\n\n return _load_test_obj(context, obj_cls, obj_data, **kw)",
"def test_get_goals(self):\n pass",
"def get_goal(self):\n self._pid_lock.acquire() # Acquire Lock\n rtn = self._goal\n self._pid_lock.release() # Release Lock\n\n return rtn",
"def query3() :",
"def get_goal(self):\n return self.get_observation(self.env._get_goal())",
"def get_all_goals(self):\r\n\t\twith self.conn:\r\n\t\t\tself.c.execute(\"\"\"SELECT * FROM goals\"\"\")\r\n\t\t\ttup_list = self.c.fetchall()\r\n\t\treturn self._convert_tup_list_to_dict_list(tup_list)",
"def goal(self, goal_id):\r\n return goals.Goal(self, goal_id)",
"def test_deleting_goal(self):\n\n delete_goal(1)\n self.assertIsNone(Goal.query.get(1))",
"def fetch_goal_context_description(self, goalId):\n GET_ALL_CONTEXTS = \"\"\"SELECT curriculum_name, description FROM Goal WHERE id = %s \"\"\"\n\n try:\n self.db_cursor.execute(GET_ALL_CONTEXTS,(goalId,))\n descriptions = self.db_cursor.fetchall()\n if len(descriptions) <= 0:\n return None\n\n ret_str = []\n ret_str.append(f\"Goal #{goalId}\\n\")\n ret_str.append(f\"From {descriptions[0][0]}: {descriptions[0][1]}\")\n if len(descriptions) > 1:\n ret_str.append(\" (and more)\")\n return ''.join(ret_str)\n\n except:\n logging.warning(\"DBAdapter: Error- cannot retrieve goal contexts from id: \" + str(goalId))\n return None",
"def getById(self, id_goals):\n lparam = [id_goals]\n rep = AbstractDAO._read(self, R_READBYID, lparam)\n return self.__fetch_to_object(rep, True)",
"def get(self, story_id):",
"def query(self):\n pass",
"def get():\n id_num = int(input('Enter the ID number of the item you wish to retrieve\\n'))\n db_actions.retrieve(id_num)",
"def find_goal(self, concl, goal_id):\n prf = self.prf\n try:\n for n in goal_id:\n for item in prf.items[:n]:\n if item.th is not None and item.th.can_prove(concl):\n return item.id\n prf = prf.items[n].subproof\n except (AttributeError, IndexError):\n raise TacticException()",
"def goal(self):\n return self._build_goal",
"def _get_db(self):\n gt_db = ...\n return gt_db",
"def goal(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"goal\")",
"def get(self, request):\n return self.execute_query()",
"def get_goal(self) -> GoalType:\n return self.goal",
"def retrieve(self):\n self.DB.close_connection()\n self.r1.queryCase = self.case\n self.r1.knn(1)",
"def fetch_from_db(self):\n self._potential_deals = DBApi.get_instance().potential_records\n self._filters = DBApi.get_instance().filters\n # Add markdown for url\n for data in self._potential_deals:\n data[\"url\"] = f\"[Link]({data['url']})\"\n self._potential_deals_cols = self._db_api.get_potential_deal_columns()\n self._years = self._db_api.get_unique_years(self._potential_deals)\n self._make_model = self._db_api.get_all_make_models()\n self._action_options = [\"Action1\", \"Action2\", \"Action3\"]",
"def goal_info(self):\n return self._goal_info_cache",
"def curriculum_goal_list(self, curriculum_name):\n ret = []\n try:\n self.db_cursor.execute(\n \"\"\"SELECT id, description FROM Goal WHERE curriculum_name = %s\"\"\",\n (curriculum_name,))\n goals = self.db_cursor.fetchall()\n\n if goals:\n go = Goal()\n go.curriculum_name = curriculum_name\n for g in goals:\n go.description = g[1]\n go.id = g[0]\n ret.append(go)\n\n except:\n logging.warning(\"DBAdapter: Error- cannot retrieve curriculum goal list: \" + str(curriculum_name))\n\n return ret",
"def get(self, **args ):\n # Make sure its a valid argument\n for key in args.keys():\n if not key in self.schema:\n raise BadArgument(\"Key %s not a valid argument\" % key )\n\n query = STD.select('*')\n query = query.where( args )\n item = query.list()\n\n # If a list return make sure there is only one item\n if isinstance(item, collections.Iterable):\n if len(item) > 1:\n raise NotUnique(\"More than one items found\")\n if len(item) == 0:\n print \"No items found\"\n return None\n else:\n item = item[0]\n return item",
"def download_all_ground_truths(request):\n\n json_resp = {}\n json_resp['ground_truth'] = []\n cursor = connection.cursor()\n mode = request.GET.get('gt_mode',None)\n if mode is None:\n human = NameSpace.objects.get(ns_id = 'Human')\n robot = NameSpace.objects.get(ns_id = 'Robot')\n gt_human = GroundTruthLogFile.objects.filter(ns_id = human)\n agent = User.objects.get(ns_id = robot,username = 'Robot_user')\n gt_robot = GroundTruthLogFile.objects.filter(ns_id = robot,username = agent)\n for el in gt_human:\n gt_json = el.gt_json\n if gt_json['gt_type'] == 'concept-mention':\n gt_json['gt_type'] = 'linking'\n json_resp['ground_truth'].append(gt_json)\n for el in gt_robot:\n gt_json = el.gt_json\n if gt_json['gt_type'] == 'concept-mention':\n gt_json['gt_type'] = 'linking'\n json_resp['ground_truth'].append(gt_json)\n cursor.execute(\"SELECT gt_json FROM ground_truth_log_file WHERE ns_id = %s AND username != %s\",['Robot','Robot_user'])\n ans = cursor.fetchall()\n for el in ans:\n gt_json = json.loads(el[0])\n if gt_json['gt_type'] == 'concept-mention':\n gt_json['gt_type'] = 'linking'\n json_resp['ground_truth'].append(gt_json)\n\n elif mode.lower() == 'automatic':\n cursor.execute(\n \"SELECT gt_json FROM ground_truth_log_file WHERE ns_id = %s AND username != %s\",\n ['Robot', 'Robot_user'])\n\n #CAMBIO\n # cursor.execute(\n # \"SELECT g.gt_json FROM ground_truth_log_file AS g INNER JOIN ground_truth_log_file AS gg ON g.id_report = gg.id_report AND g.language = gg.language AND g.gt_type = gg.gt_type AND g.id_report = gg.id_report AND g.ns_id = gg.ns_id WHERE g.ns_id = %s AND g.username != %s AND gg.username = %s AND g.insertion_time != gg.insertion_time\",\n # ['Robot', 'Robot_user', 'Robot_user'])\n ans = cursor.fetchall()\n for el in ans:\n gt_json = json.loads(el[0])\n\n if gt_json['gt_type'] == 'concept-mention':\n gt_json['gt_type'] = 'linking'\n json_resp['ground_truth'].append(gt_json)\n\n return JsonResponse(json_resp)",
"def goal(self, goal_id):\r\n return Goal(self, goal_id)",
"def test_creating_goal_dict(self):\n\n goal = Goal.query.get(1)\n goal_dict = create_goal_dict(\"goal\", goal)\n\n self.assertIsInstance(goal_dict, dict)\n self.assertEqual(goal_dict[\"goal\"][\"goal_body\"], goal.goal_body)"
]
| [
"0.6538818",
"0.6132327",
"0.60861653",
"0.6083641",
"0.5853879",
"0.57920897",
"0.5776673",
"0.57731026",
"0.57363534",
"0.5716141",
"0.56374633",
"0.5637404",
"0.5626346",
"0.56118083",
"0.5597107",
"0.5588083",
"0.5571267",
"0.5556646",
"0.5503777",
"0.5475083",
"0.54522455",
"0.545156",
"0.5438103",
"0.5417973",
"0.5392787",
"0.5390007",
"0.537297",
"0.5348159",
"0.53469497",
"0.5311212"
]
| 0.66596687 | 0 |
Function to write course goal to the db | def set_course_goal(self, goal_id, course_name):
self.db_cursor.execute(
"""INSERT INTO CourseGoals (course_name, goal_id) VALUES (%s, %s)""",
(course_name, goal_id))
self.db_connection.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_course(self, course: Course) -> None:\n self.collection.insert_one(course.dict())",
"def update_db():\n \n with open(\"courses_2016.json\") as data:\n data = data.read()\n\n courses = json.loads(data)\n\n for course in courses:\n try:\n [dept, course] = course.split(\" \")\n text = get_course(dept, course)\n insert_course(dept, course, text)\n except:\n failures.append(course)",
"def save_course(self):\r\n self.course.save()\r\n self.store.update_item(self.course, self.user.id)",
"def save_db(self) -> None:",
"def save(self, db):\n db.query(\n \"INSERT INTO fellows (name, accomodation)\\\n VALUES(:name, :accomodation)\",\n name=self.name, accomodation=self.wants_accomodation\n )",
"def edit_course(self, course):\n EDIT_COURSE = \"\"\"UPDATE Course SET subject_code = %s, credit_hours = %s, description = %s WHERE name = %s\"\"\"\n\n self.db_cursor.execute(EDIT_COURSE, (\n course.subject_code, course.credit_hours, course.description, course.name))\n self.db_connection.commit()\n\n DELETE_COURSE_TOPICS = \"\"\"DELETE FROM CourseTopics WHERE course_name = %s\"\"\"\n self.db_cursor.execute(DELETE_COURSE_TOPICS, (course.name,))\n self.db_connection.commit()\n INSERT_COURSE_TOPICS = \"\"\"INSERT INTO CourseTopics (course_name, topic_id) VALUES (%s, %s)\"\"\"\n for ct in course.topics:\n self.db_cursor.execute(INSERT_COURSE_TOPICS, (course.name,ct))\n self.db_connection.commit()\n\n DELETE_COURSE_GOALS = \"\"\"DELETE FROM CourseGoals WHERE course_name = %s\"\"\"\n self.db_cursor.execute(DELETE_COURSE_GOALS, (course.name,))\n self.db_connection.commit()\n INSERT_COURSE_GOALS = \"\"\"INSERT INTO CourseGoals (course_name, goal_id) VALUES (%s, %s)\"\"\"\n for cg in course.goals:\n self.db_cursor.execute(INSERT_COURSE_GOALS, (course.name, cg))\n self.db_connection.commit()",
"def saveProgrammingValuesToDatabase(self):\n #print(\"save programming values\")\n #print(\"username: \", self.user.username)\n #self.user.data.printData()\n self.dbManager.setUserProgramData(self.user.username, self.user.data)",
"def save(self):\n self.db.commit()",
"def insert_goal(self, goal_dict):\r\n\t\twith self.conn:\r\n\t\t\tself.c.execute(\"\"\"INSERT INTO goals VALUES(\r\n\t\t\t\t:task, :goal_time\r\n\t\t\t\t)\"\"\", goal_dict)",
"def save_course(self):\n\t\tprint(\"Course sauvegardee\")\n\t\tprint(self.Course)\n\n\t\tprint(\"self.var_nom : \"+self.var_nom.get())\n\t\tself.Course.name=self.var_nom.get()\n\t\tprint(\"self.vqr_ete : \"+str(self.var_ete.get()))\n\t\tif(self.var_ete.get()==1):\n\t\t\tself.Course.season = \"Seulement ete\"\n\t\telif(self.var_hiver.get()==1):\n\t\t\tself.Course.season = \"Seulement hiver\"\n\t\telse:\n\t\t\tself.Course.season = \"Toutes\"\n\n\n\t\tif self.var_OK_invites.get() == 1:\n\t\t\tself.Course.OK_for_invitee = True\n\n\t\tif self.var_preparer_la_veille.get() == 1:\n\t\t\tself.Course.prepare_day1 = True\n\n\t\tif self.var_legume.get() == 1:\n\t\t\tself.Course.type_course = \"Legume\"\n\t\telif self.var_viande.get() == 1:\n\t\t\tself.Course.type_course = \"Viande\"\n\t\telif self.var_poisson.get() == 1:\n\t\t\tself.Course.type_course = \"Poisson\"\n\t\telif self.var_puree.get() == 1:\n\t\t\tself.Course.type_course = \"Puree\"\n\t\telif self.var_soupe.get() == 1:\n\t\t\tself.Course.type_course = \"Soupe\"\n\t\telif self.var_salade.get() == 1:\n\t\t\tself.Course.type_course = \"Salade\"\n\t\telif self.var_autre .get() == 1:\n\t\t\tself.Course.type_course = \"Autres\"\n\t\telse:\t\n\t\t\tself.Course.type_course = \"Autres\"\n\t\t\n\n\t\tself.Course.recipe = self.text_recipe.get(\"1.0\",END)\n\t\tself.Course.link = self.text_link.get(\"1.0\",END)\n\t\tprint(self.Course)\n\t\t\n\t\tself.getListOfRecette()\n\t\tself.list_course.append(self.Course)\n\t\tself.saveListOfRecette()\n\t\t#on quitte la fenetreTopLevel\t\n\t\tself.parentFrame.destroy()",
"def save_in_db(self):\n self.sql_database.table_name = self.table_db\n self.sql_database.db_name = self.db\n if self.sql_database.insert_item(text_path=self.path, word_first=self.word_1.get(),\n word_second=self.word_2.get(),\n word_third=self.word_3.get(), word_fourth=self.word_4.get(),\n word_fifth=self.word_5.get()):\n msg.showinfo(message=\"Done\")",
"def save(self, db):\n pass",
"def save(self):\n self.__db.commit()",
"def save(database, resource):\n # TODO\n pass",
"def commit(self):",
"def Save(self) -> None:\n self.__conn.commit()",
"def save(self)->None:\n database.cursor.execute(\"INSERT INTO meetups(topic,happening_date,tags,location,images,body) VALUES(%s,%s,%s,%s,%s,%s) RETURNING id\", (\n self.topic,\n self.happening_on,\n self.tags,\n self.location,\n self.images,\n self.body\n ))\n super().save()",
"def save(self):\n self.add_statements(self.triples())",
"def write_res(res):\n if res is None:\n return None\n db = Database('requsts_res.sqlite3')\n curs = db.connect()\n try:\n curs.execute('''INSERT INTO content_new (url, binary_content) VALUES (?,?)''', (res.url, res._content,))\n db.conn.commit()\n except sqlite3.ProgrammingError as e:\n print('Database Error')\n print(e)\n raise e",
"def commit():\n get_db().commit()",
"def save():",
"def save(self, *args):\n self.party_name, self.office_name, self.user_id, self.date_created, self.status = args\n format_str = f\"\"\"\n INSERT INTO public.applications (party_name,office_name,user_id,date_created,status)\n VALUES ('{args[0]}','{args[1]}','{args[2]}','{(datetime.now())}','pending');\n \"\"\"\n cursor.execute(format_str)",
"def update_course(self):\n # ensure that updating course is exists\n if self.is_course_exists():\n db = Course._file.read_db()\n for crs_i in range(len(db[\"courses\"])):\n if db[\"courses\"][crs_i][\"course_name\"] == self._course_name:\n\n # ensuring that user does not provided less number of limited places\n if db[\"courses\"][crs_i][\"total_place\"] > self._total_place:\n print(\"{} course's limited places number must be more than {}\".format(\n self._course_name,\n db[\"courses\"][crs_i][\"total_place\"]\n ))\n return\n\n db[\"courses\"][crs_i][\"teacher\"] = self._teacher\n db[\"courses\"][crs_i][\"total_place\"] = self._total_place\n break\n self._file.write_db(db)\n print(\"The course - {} is updated\".format(self._course_name))\n return self.get_course().course_info()",
"def write_to_db(batch_num,pH,cursor):\n\n\t#Batch Number\n\tbatch = 1\n\t\t\n\n\tcur_time = datetime.now()\n\theater_state = 1\n\ttemp = 30.1\n\tamb_hum, amb_temp = 25.1,30.2\n\n\tcursor.execute('''INSERT INTO Kombucha_Data('Time', 'Batch', 'Heater State', 'Temperature', 'Ambient Temperature', 'Ambient Humidity','pH')\n\t\t\t\t VALUES(?,?,?,?,?,?,NULL)''', (cur_time, batch_num, heater_state, temp, amb_temp, amb_hum))\n\tcursor.commit()\n\treturn",
"def save(self, db):\n db.query(\n \"INSERT INTO rooms (name, type) VALUES(:name, :type)\",\n name=self.name, type='O'\n )",
"def write_to_db( self, *args ):\n try:\n toSave = [ a for a in args ]\n # save them\n self.session.add_all( toSave )\n self.session.commit()\n self._fire_save_notification()\n return True\n except Exception as e:\n print( \"Error : %s\" % e )\n self._fire_error_saving_notification( e )\n return False",
"def save_query(self):\r\n self.conn.commit()",
"def write_to_database(day, data_to_write):\n\n \"\"\" PSQL commands to create the database:\n # CREATE TABLE phonedata (id serial primary key, date DATE, data JSONB);\n # GRANT ALL ON phonedata TO user;\n # GRANT ALL ON SEQUENCE phonedata_id_seq TO user;\n \"\"\"\n\n # Parameters used to connect to the database \n params = {\"host\":\"localhost\",\n \"port\":\"5432\",\n \"database\":\"postgres\", \n \"user\":\"###\", \n \"password\":\"###\"}\n\n # Connect to the database with the parameters above\n conn = psycopg2.connect(**params)\n print(\"PostgreSQL connection is open\")\n\n # enable json handling\n psycopg2.extras.register_json(conn)\n\n # Connect the cursor\n cursor = conn.cursor()\n \n # INSERT the Google Analytics data with date into the database\n print(\"INSERTING into the database now\")\n\n query = \"INSERT INTO phonedata ( date, data ) VALUES ( (CURRENT_DATE - %s)::DATE, %s)\"\n\n cursor.execute(query, (day, psycopg2.extras.Json(data_to_write)))\n \n # Make the changes to the database persistent\n conn.commit()\n\n # Close the connection with the database\n cursor.close()\n conn.close()\n print(\"PostgreSQL connection is closed\")",
"def add_project(title, description, max_grade):\n\n QUERY = \"\"\"\n INSERT INTO Projects (title, description, max_grade) VALUES (?, ?, ?)\n \"\"\"\n\n db_cursor.execute(QUERY, (title, description, max_grade))\n db_connection.commit()\n\n print \"Successfully added %s: %s with a max grade of %s\" % (title, description, max_grade)",
"def commitToDatabase(self, tiltseriesdata):\n\t\tapDisplay.printError(\"you did not create a 'commitToDatabase' function in your script\")\n\t\traise NotImplementedError()"
]
| [
"0.6199046",
"0.60579056",
"0.60428387",
"0.59549844",
"0.59167856",
"0.5892669",
"0.5788649",
"0.56852055",
"0.5645917",
"0.56355464",
"0.56051177",
"0.56049925",
"0.55996007",
"0.55829275",
"0.55780876",
"0.5575448",
"0.5556891",
"0.5539692",
"0.55377734",
"0.54941165",
"0.5482023",
"0.54685867",
"0.5454029",
"0.54384595",
"0.5437475",
"0.54150254",
"0.5409324",
"0.5408894",
"0.5402033",
"0.539028"
]
| 0.6796742 | 0 |
Function to write course topic to the db | def set_course_topic(self, topic_id,course_name):
self.db_cursor.execute(
"""INSERT INTO CourseTopics (course_name, topic_id) VALUES (%s, %s)""",
(course_name, topic_id))
self.db_connection.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_course_topics(user, token, course):\n\n # save topics' data\n GOOGLE_API_TOPICS = 'https://classroom.googleapis.com/v1/courses/%s/topics' % course.id\n headers = {\n 'content-type': 'application/json',\n }\n\n response = requests.get(\n GOOGLE_API_TOPICS,\n params={'access_token': token},\n headers=headers)\n\n topics = response.json()['topic']\n for topic in topics:\n Topic.objects.create(\n id=topic['topicId'],\n course=course,\n name=topic['name'].title(),\n updated_at=topic['updateTime']\n )",
"def write_topics(con, cur, beta_file, vocab):\n cur.execute('CREATE TABLE topics (id INTEGER PRIMARY KEY, title VARCHAR(100))')\n con.commit()\n\n #NOTE: What is the following line for and why doesn't it raise an error?\n topics_file = open(filename, 'a')\n\n for topic in open(beta_file, 'r'):\n topic = map(float, topic.split())\n index = argsort(topic)[::-1] # reverse argsort\n ins = 'INSERT INTO topics (id, title) VALUES(NULL, ?)'\n buf = \"{%s, %s, %s}\" % (vocab[index[0]],\n vocab[index[1]],\n vocab[index[2]])\n cur.execute(ins, [buffer(buf)])\n\n con.commit()",
"def insert_topic(self,text,addition,year,user):\r\n topic = Topic(date=date.today(),text=text,year=year,user=user)\r\n topic.addition = addition\r\n \r\n session = self.persistence.get_session()\r\n session.add(topic)\r\n session.commit()",
"def add_new_topic_to_db(self, topic_obj):\n # todo: don't need this anymore\n self.db_cursor.execute(\"\"\"INSERT INTO Topic (id, name) VALUES (%s, %s)\"\"\", (topic_obj.id, topic_obj.name))\n self.db_connection.commit()",
"def set_topic(event_id, topic):\n connection = get_connection()\n cursor = connection.cursor()\n sql_string = \"UPDATE Event SET Topic='\"+topic+\"' WHERE eventID=\"+str(event_id)\n cursor.execute(sql_string)\n connection.commit()",
"def test_update_topic_courses(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass",
"def test_create_new_discussion_topic_courses(self):\r\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\r\n pass",
"def set_topic(self, new_topic, updating=False):\n\n TOPIC_QUERY = \"\"\"UPDATE Topic SET name = %s WHERE id = %s\"\"\" if updating \\\n else \"\"\"INSERT INTO Topic (name, id) VALUES (%s, %s)\"\"\"\n\n\n\n self.db_cursor.execute(TOPIC_QUERY, (new_topic.name, new_topic.id))\n self.db_connection.commit()",
"def test_topic_save(forum, user):\n post = Post(content=\"Test Content\")\n topic = Topic(title=\"Test Title\")\n\n assert forum.last_post_id is None\n assert forum.post_count == 0\n assert forum.topic_count == 0\n\n topic.save(forum=forum, post=post, user=user)\n\n assert topic.title == \"Test Title\"\n\n topic.title = \"Test Edit Title\"\n topic.save()\n\n assert topic.title == \"Test Edit Title\"\n\n # The first post in the topic is also the last post\n assert topic.first_post_id == post.id\n assert topic.last_post_id == post.id\n\n assert forum.last_post_id == post.id\n assert forum.post_count == 1\n assert forum.topic_count == 1",
"def save_course(self, course: Course) -> None:\n self.collection.insert_one(course.dict())",
"def save_courses(user, token):\n\n GOOGLE_API_COURSES = 'https://classroom.googleapis.com/v1/courses/'\n headers = {\n 'content-type': 'application/json',\n }\n\n response = requests.get(\n GOOGLE_API_COURSES,\n params={'access_token': token},\n headers=headers)\n\n courses = response.json()['courses']\n\n Course.objects.filter(teacher=user).delete()\n\n for course in courses:\n saved_course = Course.objects.create(\n id=course['id'],\n teacher=user,\n name=course['name'].title(),\n section=course['section'].title(),\n state=course['courseState'],\n link=course['alternateLink'],\n teachers_email=course['teacherGroupEmail'],\n course_email=course['courseGroupEmail'],\n created_at=course['creationTime'],\n updated_at=course['updateTime']\n )\n save_course_topics(user, token, saved_course)",
"def save_course(self):\r\n self.course.save()\r\n self.store.update_item(self.course, self.user.id)",
"def test_subscribe_to_topic_courses(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass",
"def store(self, topic_id, start_date, end_date, date_axis, count_axis, parties_proportions):\n document = {'topic_id': topic_id,\n 'start_date': start_date,\n 'end_date': end_date,\n 'date_axis': date_axis,\n 'count_axis': count_axis,\n 'parties_proportions': parties_proportions}\n self.insert(document)",
"def add_teacher_data(connection,name,tsc_no,subjects,type_of_teacher):\r\n with connection:\r\n connection.execute(INSERT_TEACHER,(name,tsc_no,subjects,type_of_teacher))",
"def save(self)->None:\n database.cursor.execute(\"INSERT INTO meetups(topic,happening_date,tags,location,images,body) VALUES(%s,%s,%s,%s,%s,%s) RETURNING id\", (\n self.topic,\n self.happening_on,\n self.tags,\n self.location,\n self.images,\n self.body\n ))\n super().save()",
"def create_topic():\n nodes = Node.query.all()\n form = TopicForm(nodes)\n if request.method == 'POST':\n topic = Topic(title=request.form.get('title'),\n content=request.form.get('content'),\n node_id=request.form.get('node_id'),\n user=current_user._get_current_object())\n db.session.add(topic)\n db.session.commit()\n return jsonify({\"result\": 'ok'})\n\n return render_template('main/create_topic.html', nodes=nodes, form=form)",
"def save(self, **kwargs):\n\t\tif self.pk:\n\t\t\tself.topic_modification_date = datetime.now()\n\t\tsuper(Topic, self).save(**kwargs)",
"def create_chatroom(request, topic_id):\n topic = get_object_or_404(Topic, pk=topic_id)\n\n session = Session()\n session.topic = topic\n session.junior = request.user\n session.save()\n\n return redirect('articles:chatroom', id=session.id)",
"def test_mark_topic_as_read_courses(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass",
"def set_topic(self,topic_path,topic_text):\n params = {'username': self.settings['auth']['username'],\n 'password': self.settings['auth']['password']}\n\n # -- Grab the crypttoken by editing the page but doing nothing.\n twiki_cgi = \"{:s}/bin/edit/{:s}\".format(self.url,topic_path)\n response = self.session.get(twiki_cgi,params=params)\n\n # -- Parse the HTML to get the crypttoken value.\n soup = BeautifulSoup(response.text, 'html.parser')\n crypttoken = soup.find(attrs={\"name\": \"crypttoken\"})['value']\n params['crypttoken'] = crypttoken\n\n twiki_cgi = \"{:s}/bin/save/{:s}\".format(self.url,topic_path)\n data = {'username': self.settings['auth']['username'],\n 'password': self.settings['auth']['password'],\n 'text': topic_text,\n 'crypttoken': crypttoken}\n response = self.session.post(twiki_cgi, data=data)\n\n return response",
"def edit_topic():\n topic = db.topic(request.args(0))\n form = SQLFORM(db.topic, record=topic)\n form.vars.description = text_store_read(topic.description)\n if form.validate():\n topic.update_record(\n name=form.vars.name,\n )\n text_store_write(form.vars.description, key=topic.description)\n session.flash = T('The topic has been created')\n redirect(URL('default', 'index'))\n return dict(form=form)",
"def dump_topic(dump_args):\n topic_records = db[dump_args.topic].find({}) # '{}' pulls all the records\n records = [record for record in topic_records]\n with open(os.path.join(os.getcwd(), dump_args.topic), 'w') as f:\n dump(loads(dumps(records)), f, indent=4)",
"def remove_course_topics(self, course):\n DELETE_COURSE_TOPICS = \"\"\"DELETE FROM CourseTopics WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_TOPICS, (course.name,))\n self.db_connection.commit()",
"def save(self, data):\n\n tags = '{'\n\n for tag in data['tags']:\n tags += '\"' + tag + '\",'\n\n tags = tags[:-1] + '}'\n\n query = \"INSERT INTO {} (topic, description, tags, location, happening_on)\\\n VALUES ('{}', '{}', '{}', '{}', '{}') RETURNING *\".format(\n self.table, data['topic'], data['description'], tags,\n data['location'], data['happening_on']\n )\n\n return self.insert(query)",
"def test_delete_topic_courses(self):\r\n course_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.delete_topic_courses(topic_id, course_id)",
"def associate_topic(self, topic):\n new_topics_courses = TopicsCourses()\n new_topics_courses.topic = topic\n self.topics_courses.append(new_topics_courses)\n return new_topics_courses",
"def save_topic_rights(topic_rights, committer_id, commit_message, commit_cmds):\n\n model = topic_models.TopicRightsModel.get(topic_rights.id, strict=False)\n\n model.manager_ids = topic_rights.manager_ids\n\n model.commit(committer_id, commit_message, commit_cmds)",
"def new_topic(request):\n form = Form(request, schema=TopicSchema)\n if form.validate():\n topic = form.data[\"title\"]\n author = form.data[\"author\"]\n desc = form.data[\"description\"]\n date = datetime.datetime.now()\n url = slugfy(topic)\n topic_tuple = {\n \"title\": topic,\n \"url\": url,\n \"author\": author,\n \"description\": desc,\n \"topic_date\": date.strftime(\"%d/%m/%Y\"),\n }\n request.db[\"topic\"].insert(topic_tuple)\n return HTTPFound(location=\"/\")\n\n return render_to_response(\n \"templates/new_topic.html\",\n {\"form\": FormRenderer(form), \"count\": count(request)},\n request=request,\n )",
"def insert_course_enrollment(self, student_id, course_id, course_section_id):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n try:\n with conn:\n cursor.execute(\n \"\"\"\n INSERT INTO course_enrollments ('student_id', 'course_id', 'course_section_id') VALUES\n (?,?,?)\"\"\",\n (student_id, course_id, course_section_id),\n )\n return 1\n except sqlite3.IntegrityError:\n return -1"
]
| [
"0.72414744",
"0.65759945",
"0.64926547",
"0.6254328",
"0.6082379",
"0.59410554",
"0.5932748",
"0.5922048",
"0.58537835",
"0.58148694",
"0.5754014",
"0.5738365",
"0.5724344",
"0.5625807",
"0.56163025",
"0.56147015",
"0.56041497",
"0.55994385",
"0.5586847",
"0.5568423",
"0.5561985",
"0.55512285",
"0.5513653",
"0.550855",
"0.54624045",
"0.54557467",
"0.5439155",
"0.542767",
"0.5418823",
"0.5417976"
]
| 0.7521758 | 0 |
Function to get a course topic from the db | def get_course_topic(self, topic_id, course_name):
ret = None
try:
self.db_cursor.execute(
"""SELECT course_name, topic_id FROM CourseTopics WHERE course_name = %s AND topic_id = %s""",
(course_name, topic_id))
ct = self.db_cursor.fetchall()
if ct:
cname = ct[0][0]
ctopic = ct[0][1]
ret = [cname, ctopic]
else:
ret = None
except:
logging.warning("DBAdapter: Error- cannot retrieve course topic: " + str(id))
return ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_single_topic_courses(self):\r\n course_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.get_single_topic_courses(topic_id, course_id)",
"def get_topic(self, id):\n TOPIC = \"\"\"SELECT COUNT(*) FROM Topic WHERE id = %s\"\"\"\n\n ret = None\n try:\n self.db_cursor.execute(\"\"\"SELECT name, id FROM Topic WHERE id = %s\"\"\", (id,))\n t = self.db_cursor.fetchall()\n ret = Topic()\n ret.name = t[0][0]\n ret.id = id\n\n except:\n logging.warning(\"DBAdapter: Error- cannot retrieve topic with id \" + str(id))\n return None\n\n return ret",
"def test_get_full_topic_courses(self):\r\n course_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.get_full_topic_courses(topic_id, course_id)",
"def get_topic(title):\n return Topic.get(Topic.title == title)",
"def select_course(self, subject, course_num):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"SELECT * FROM courses WHERE (subject=? AND course_num=?)\",\n (subject, course_num),\n )\n return cursor.fetchone()",
"def get_topic(self,topic_path):\n twiki_cgi = \"{:s}/bin/view/{:s}\".format(self.url,topic_path)\n\n params = {'username': self.settings['auth']['username'],\n 'password': self.settings['auth']['password'],\n 'raw': 'text'}\n response = self.session.get(twiki_cgi, params=params)\n\n return response",
"def get_curriculum_topic(self, curriculum_name, curriculum_topic):\n ret = None\n try:\n self.db_cursor.execute(\n \"\"\"SELECT level, subject_area, time_unit FROM CurriculumTopics WHERE curriculum_name = %s AND topic_id = %s\"\"\",\n (curriculum_name, curriculum_topic))\n ct = self.db_cursor.fetchall()\n if ct:\n ret = CurriculumTopic\n level = ct[0][0]\n subject_area = ct[0][1]\n time_unit = ct[0][2]\n ret.curriculum_name = curriculum_name\n ret.topic_id = curriculum_topic\n ret.time_unit = time_unit\n ret.subject_area = subject_area\n ret.level = level\n else:\n ret = None\n\n except:\n logging.warning(\"DBAdapter: Error- cannot retrieve curriculum topic: \" + str(id))\n\n return ret",
"def topic(request, topic_id):\n\ttopic = Topic.objects.get(id=topic_id)\n\tvocabs = topic.vocab_set.all()\n\tcontext = {'topic': topic, 'vocabs':vocabs}\n\treturn render(request, 'Toeic/topic.html', context)",
"def get_topics(category):\n page = requests.get(BASE_URL, verify=False)\n soup = BeautifulSoup(page.content)\n output = []\n get_lesson_id = lambda url: url.split('=')[-1]\n\n if category == 'Top 10 Courses':\n playlist = soup.find(id='featured_playlists')\n for item in playlist.findAll('div', 'item'):\n link = item.find('a', 'featured-playlist-title')\n output.append({\n 'thumbnail': item.find('img').get('src'),\n 'title': link.text.replace(' ', '').strip(),\n 'lesson_id': get_lesson_id(link['href'])})\n else:\n sidebar = soup.find(id='main_aside')\n for dl in sidebar.findAll('dl'):\n if dl.find('h4').text == category:\n for item in dl.findAll('dd'):\n link = item.find('a', 'category-name')\n output.append({\n 'title': link.getText(' '),\n 'lesson_id': get_lesson_id(link['href'])})\n\n return output",
"def GetTopic(self, topic_id):\n return self._TopicSearchHelper(self.topics, topic_id)",
"def get_class_topic(class_id):\n topic_data = query_db(\"SELECT id, name FROM topics WHERE class_id=?\", [class_id])\n topics = []\n for topic in topic_data:\n topic_dict_class = {}\n topic_dict_class[\"id\"] = topic[0]\n topic_dict_class[\"name\"] = topic[1]\n topics.append(topic_dict_class)\n return topics",
"def get(cls, topic_info):\n try: #to treat topic info as topic.id\n return Topic.query.get(int(topic_info))\n except Exception: #treat topic info as topic.name\n return Topic.query.filter_by(name=topic_info).first()",
"def test_list_topic_entries_courses(self):\r\n course_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.list_topic_entries_courses(topic_id, course_id)",
"def get_topic(topic_id):\n topic = db_session.query(Topic).filter_by(id=topic_id).one()\n return jsonify(topic.serialize)",
"def get_topic_of_question(question):\n dynamodb = boto3.resource(\"dynamodb\", region_name=\"eu-central-1\")\n topic_table = dynamodb.Table(\"Topics\")\n\n topic_id = question.get(\"TopicId\")\n # query topic_id of the question\n try:\n response = topic_table.get_item(Key={\"TopicId\": topic_id})\n topic = response[\"Item\"]\n except:\n print(\"No topic found, returning None..\")\n return None\n return topic",
"def topic_index():\n topic = db.topic(request.args(0)) or redirect(URL('default', 'index'))\n return dict(topic=topic)",
"def get_by_topic(cls, topic):\n\t\treturn cls.get_by_key_name(get_hash_key_name(topic))",
"def topicnews(topic):\n urlnews=urltop\n url=urlnews+topic\n urlapi=url+'&'+'apiKey='\n urlcoun=urlapi+apikey\n response=requests.get(urlcoun)\n data=response.json()\n return data",
"def topic(self, topic_id):\r\n return contents.Topic(self, topic_id)",
"def get_teacher_topic_all():\n topic_data = query_db(\n \"SELECT topics.id, topics.name, classes.name FROM topics JOIN classes \"\n \"ON topics.class_id=classes.id WHERE teacher_id=?;\",\n [flask.session[\"id\"]],\n )\n topics = []\n for topic in topic_data:\n topic_dict_teacher = {}\n topic_dict_teacher[\"id\"] = topic[0]\n topic_dict_teacher[\"name\"] = flask.escape(str(topic[1]))\n topic_dict_teacher[\"class\"] = flask.escape(str(topic[2]))\n topics.append(topic_dict_teacher)\n return topics",
"def get_topic(self):\n return self.topic",
"def topic(request, topic_id):\n posts = Post.objects.filter(topic=topic_id).order_by(\"created\")\n posts = mk_paginator(request, posts, DJANGO_SIMPLE_FORUM_REPLIES_PER_PAGE)\n topic = Topic.objects.get(pk=topic_id)\n return render_to_response(\"forum/topic.html\", add_csrf(request, posts=posts, pk=topic_id,\n topic=topic), context_instance=RequestContext(request))",
"def topic(self, topic_id):\r\n return topics.Topic(self, topic_id)",
"def get_course(self, name):\n GET_TOPIC_IDS = \"\"\"SELECT topic_id FROM CourseTopics WHERE course_name = %s\"\"\"\n GET_GOAL_IDS = \"\"\"SELECT goal_id FROM CourseGoals WHERE course_name = %s\"\"\"\n\n ret = None\n\n try:\n self.db_cursor.execute(\"\"\"SELECT subject_code, credit_hours, description FROM Course WHERE name = %s\"\"\", (name,))\n c = self.db_cursor.fetchall()\n ret = Course()\n ret.subject_code = c[0][0]\n ret.credit_hours = c[0][1]\n ret.description = c[0][2]\n ret.name = name\n ret.goals = []\n ret.topics = []\n\n\n self.db_cursor.execute(GET_TOPIC_IDS, (name,))\n t_ids = self.db_cursor.fetchall()\n for id in t_ids:\n ret.topics.append(id[0])\n\n self.db_cursor.execute(GET_GOAL_IDS, (name,))\n g_ids = self.db_cursor.fetchall()\n for id in g_ids:\n ret.goals.append(id[0])\n\n except:\n logging.warning(\"DBAdapter: Error- cannot retrieve course: \" + str(name))\n return None\n\n return ret",
"def select_course_detail(self, course_id, course_section_id):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"\"\"SELECT \n c.subject, c.course_num, c.course_title,\n cs.course_section_id, cs.schedule_days, cs.start_time, cs.end_time,\n i.first_name || ' ' || i.last_name AS 'Instructor Name', c.course_units\n FROM courses c\n JOIN course_sections cs\n ON c.course_id = cs.course_id\n JOIN instructors i\n ON cs.instructor_id = i.instructor_id\n WHERE c.course_id = ? AND cs.course_section_id = ?\"\"\",\n (course_id, course_section_id),\n )\n return cursor.fetchone()",
"def get_topic_quiz(topic_id):\n quiz_data = query_db(\"SELECT id, name FROM quizzes WHERE topic_id=?;\", [topic_id])\n quizzes = []\n for quiz in quiz_data:\n quiz_topic = {}\n quiz_topic[\"id\"] = quiz[0]\n quiz_topic[\"name\"] = quiz[1]\n quizzes.append(quiz_topic)\n return quizzes",
"def schools_by_topic(mongo_collection, topic):\n return mongo_collection.find({\"topics\": {\"$in\": [topic]}})",
"def test_list_discussion_topics_courses(self):\r\n course_id = None # Change me!!\r\n\r\n r = self.client.list_discussion_topics_courses(course_id, exclude_context_module_locked_topics=None, include=None, only_announcements=None, order_by=None, scope=None, search_term=None)",
"def get_course_by_id(course_id):\n course = Courses.query. \\\n filter_by(id=course_id). \\\n first_or_404()\n\n return course",
"def get_questions_of_topic(topic):\n\n dynamodb = boto3.resource(\"dynamodb\", region_name=\"eu-central-1\")\n question_table = dynamodb.Table(\"Questions\")\n\n fe = Attr(\"TopicId\").eq(topic.get(\"TopicId\"))\n response = question_table.scan(FilterExpression=fe)\n questions = response.get(\"Items\")\n return questions"
]
| [
"0.7381407",
"0.7054905",
"0.68542206",
"0.67677593",
"0.6664088",
"0.66496855",
"0.6643712",
"0.6638423",
"0.655389",
"0.6544459",
"0.6510627",
"0.6470915",
"0.6435634",
"0.6431833",
"0.64089453",
"0.6306468",
"0.6267252",
"0.62563705",
"0.62313986",
"0.62202626",
"0.6210956",
"0.61769366",
"0.6172284",
"0.6124808",
"0.6115916",
"0.6108442",
"0.6096235",
"0.60934746",
"0.6066487",
"0.6065957"
]
| 0.7823914 | 0 |
Function to retrieve curriculum topic from the db | def get_curriculum_topic(self, curriculum_name, curriculum_topic):
ret = None
try:
self.db_cursor.execute(
"""SELECT level, subject_area, time_unit FROM CurriculumTopics WHERE curriculum_name = %s AND topic_id = %s""",
(curriculum_name, curriculum_topic))
ct = self.db_cursor.fetchall()
if ct:
ret = CurriculumTopic
level = ct[0][0]
subject_area = ct[0][1]
time_unit = ct[0][2]
ret.curriculum_name = curriculum_name
ret.topic_id = curriculum_topic
ret.time_unit = time_unit
ret.subject_area = subject_area
ret.level = level
else:
ret = None
except:
logging.warning("DBAdapter: Error- cannot retrieve curriculum topic: " + str(id))
return ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_topic_of_question(question):\n dynamodb = boto3.resource(\"dynamodb\", region_name=\"eu-central-1\")\n topic_table = dynamodb.Table(\"Topics\")\n\n topic_id = question.get(\"TopicId\")\n # query topic_id of the question\n try:\n response = topic_table.get_item(Key={\"TopicId\": topic_id})\n topic = response[\"Item\"]\n except:\n print(\"No topic found, returning None..\")\n return None\n return topic",
"def topic(request, topic_id):\n\ttopic = Topic.objects.get(id=topic_id)\n\tvocabs = topic.vocab_set.all()\n\tcontext = {'topic': topic, 'vocabs':vocabs}\n\treturn render(request, 'Toeic/topic.html', context)",
"def get_topic(topic_id):\n topic = db_session.query(Topic).filter_by(id=topic_id).one()\n return jsonify(topic.serialize)",
"def get_topic(self, id):\n TOPIC = \"\"\"SELECT COUNT(*) FROM Topic WHERE id = %s\"\"\"\n\n ret = None\n try:\n self.db_cursor.execute(\"\"\"SELECT name, id FROM Topic WHERE id = %s\"\"\", (id,))\n t = self.db_cursor.fetchall()\n ret = Topic()\n ret.name = t[0][0]\n ret.id = id\n\n except:\n logging.warning(\"DBAdapter: Error- cannot retrieve topic with id \" + str(id))\n return None\n\n return ret",
"def get_topic(title):\n return Topic.get(Topic.title == title)",
"def get_topic(self,topic_path):\n twiki_cgi = \"{:s}/bin/view/{:s}\".format(self.url,topic_path)\n\n params = {'username': self.settings['auth']['username'],\n 'password': self.settings['auth']['password'],\n 'raw': 'text'}\n response = self.session.get(twiki_cgi, params=params)\n\n return response",
"def get(cls, topic_info):\n try: #to treat topic info as topic.id\n return Topic.query.get(int(topic_info))\n except Exception: #treat topic info as topic.name\n return Topic.query.filter_by(name=topic_info).first()",
"def topic_index():\n topic = db.topic(request.args(0)) or redirect(URL('default', 'index'))\n return dict(topic=topic)",
"def get_topics():\n topics, _ = base_query(db_session)\n return jsonify([p.serialize for p in topics])",
"def get_questions_of_topic(topic):\n\n dynamodb = boto3.resource(\"dynamodb\", region_name=\"eu-central-1\")\n question_table = dynamodb.Table(\"Questions\")\n\n fe = Attr(\"TopicId\").eq(topic.get(\"TopicId\"))\n response = question_table.scan(FilterExpression=fe)\n questions = response.get(\"Items\")\n return questions",
"def _get_topic_for_response():\n return _get_topic_base() + \"res/\"",
"def topicnews(topic):\n urlnews=urltop\n url=urlnews+topic\n urlapi=url+'&'+'apiKey='\n urlcoun=urlapi+apikey\n response=requests.get(urlcoun)\n data=response.json()\n return data",
"def GetTopic(self, topic_id):\n return self._TopicSearchHelper(self.topics, topic_id)",
"def get_class_topic(class_id):\n topic_data = query_db(\"SELECT id, name FROM topics WHERE class_id=?\", [class_id])\n topics = []\n for topic in topic_data:\n topic_dict_class = {}\n topic_dict_class[\"id\"] = topic[0]\n topic_dict_class[\"name\"] = topic[1]\n topics.append(topic_dict_class)\n return topics",
"def get_topic(self):\n return self.topic",
"def test_single_topic_retrieval_authenticated(self):\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.free_token.key)\n response = self.client.get('/topic/Topic 1/', format='json')\n data = json.loads(response.content)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(data['name'],'Topic 1')\n self.assertEqual(data['description'],'The first topic.')",
"def test_get_single_topic_courses(self):\r\n course_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.get_single_topic_courses(topic_id, course_id)",
"def get_course_topic(self, topic_id, course_name):\n ret = None\n try:\n self.db_cursor.execute(\n \"\"\"SELECT course_name, topic_id FROM CourseTopics WHERE course_name = %s AND topic_id = %s\"\"\",\n (course_name, topic_id))\n ct = self.db_cursor.fetchall()\n if ct:\n cname = ct[0][0]\n ctopic = ct[0][1]\n ret = [cname, ctopic]\n else:\n ret = None\n\n except:\n logging.warning(\"DBAdapter: Error- cannot retrieve course topic: \" + str(id))\n\n return ret",
"def topic(request, topic_id):\n posts = Post.objects.filter(topic=topic_id).order_by(\"created\")\n posts = mk_paginator(request, posts, DJANGO_SIMPLE_FORUM_REPLIES_PER_PAGE)\n topic = Topic.objects.get(pk=topic_id)\n return render_to_response(\"forum/topic.html\", add_csrf(request, posts=posts, pk=topic_id,\n topic=topic), context_instance=RequestContext(request))",
"def view_topic(request, topic_slug):\n view_topic = request.db[\"topic\"].find_one({\"url\": topic_slug.matchdict[\"url\"]})\n answers = request.db[\"answer\"].find({\"topic_index\": topic_slug.matchdict[\"url\"]})\n\n return render_to_response(\n \"templates/view_topic.html\",\n {\"view_topic\": view_topic, \"answers\": answers, \"count\": count(request),},\n request=request,\n )",
"def topic(self, topic_id):\r\n return contents.Topic(self, topic_id)",
"def topic():\n\n topic = request.values[\"topic\"]\n topic = topic.replace(\"_\", \" \")\n if request.method == \"GET\":\n query = (\n DB.session.query(Comments)\n .filter(Comments.text.like(\"%\" + topic + \"%\"))\n .limit(2500)\n .all()\n )\n num_results = len(query)\n if num_results > 0:\n sentiment = [float(q.sentiment) for q in query if q.sentiment is not None]\n hist = np.histogram(sentiment, bins=10, range=(-1, 1))\n data = json.dumps(\n [round(int(hist[0][i]) / num_results * 100,1) for i in range(len(hist[0]))]\n )\n sentiment = functools.reduce(lambda x, y: x + y, sentiment) / num_results\n return render_template(\n \"topic_sentiment.html\", sentiment=sentiment, topic=topic, data=data\n )\n else:\n return render_template(\"no_results.html\")",
"def topic(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"topic\")",
"def get_tweets_by_topic(topic, start_date, end_date):\n try:\n query = f\"select tweet, sentence, polarity, subjectivity from {db_schema}.{db_table_tweet} t, {db_schema}.{db_table_pred} tp where t.id_tweet=tp.id_tweet and topic='{topic}' and tweet_date between str_to_date('{start_date}', '%Y-%m-%d') and str_to_date('{end_date}', '%Y-%m-%d')\"\n logger.info(f'QUERY: {query}') \n with MysqlCursor() as cur:\n cur.execute(query)\n tweets = cur.fetchall()\n columns = [col[0] for col in cur.description]\n logger.info(f'TOPIC: {topic}, N° TWEETS: {len(tweets)}') \n return tweets, columns\n \n except Exception as ex:\n logger.exception(ex)\n return f'Exception: {ex}'",
"def test_topic_retrieval_authenticated(self):\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.free_token.key)\n response = self.client.get('/topics/', format='json')\n data = json.loads(response.content)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(data['count'],3)\n self.assertTrue({'name': 'Topic 1', 'description': 'The first topic.'} in data['results'])\n self.assertTrue({'name': 'Topic 2', 'description': 'The second topic.'} in data['results'])",
"def get_topics(self):\n try:\n with self.__db_lock:\n sql = \"SELECT * FROM 'topics' ORDER BY 'name' ASC\"\n self.__cursor.execute(sql)\n topics = self.__cursor.fetchall()\n if topics is None or len(topics) == 0:\n return []\n return [topic[1] for topic in topics]\n except Exception as e:\n logging.error(\n \"Exception when trying to get topics list: {}\".format(e))\n return []",
"def get_topic_details(request, contentnode_id):\n # Get nodes and channel\n node = ContentNode.objects.get(pk=contentnode_id)\n try:\n request.user.can_view_node(node)\n except PermissionDenied:\n return HttpResponseNotFound(\"No topic found for {}\".format(contentnode_id))\n data = get_node_details_cached(node)\n return HttpResponse(json.dumps(data))",
"def topic(self, topic_id):\r\n return topics.Topic(self, topic_id)",
"def main_topic_doc(ldamodel, corpus=corpus): \n \n doc_topics = pd.DataFrame()\n\n for i, row in enumerate(ldamodel[corpus]):\n row = sorted(row, key=lambda x: (x[1]), reverse=True)\n\n for j, (topic_num, prop_topic) in enumerate(row):\n if j == 0:\n wp = ldamodel.show_topic(topic_num)\n topic_keywords = \"' \".join([word for word, prop in wp])\n doc_topics = doc_topics.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)\n else:\n break\n doc_topics.columns = ['Dominant_Topic', 'Percent_Contrib', 'Topic_keywords']\n return doc_topics",
"def get_by_topic(cls, topic):\n\t\treturn cls.get_by_key_name(get_hash_key_name(topic))"
]
| [
"0.67491347",
"0.6399607",
"0.6361938",
"0.6343837",
"0.6279825",
"0.62598395",
"0.6189485",
"0.6166207",
"0.6124295",
"0.60379887",
"0.60332173",
"0.60299146",
"0.59698087",
"0.59110564",
"0.59022534",
"0.5839776",
"0.58006823",
"0.57999027",
"0.577406",
"0.5755427",
"0.5752275",
"0.5724634",
"0.5720116",
"0.5692563",
"0.5678401",
"0.56642956",
"0.56611836",
"0.5650108",
"0.5630286",
"0.56254107"
]
| 0.7505171 | 0 |
Function to retrieve a list of curriculum goals from the db | def curriculum_goal_list(self, curriculum_name):
ret = []
try:
self.db_cursor.execute(
"""SELECT id, description FROM Goal WHERE curriculum_name = %s""",
(curriculum_name,))
goals = self.db_cursor.fetchall()
if goals:
go = Goal()
go.curriculum_name = curriculum_name
for g in goals:
go.description = g[1]
go.id = g[0]
ret.append(go)
except:
logging.warning("DBAdapter: Error- cannot retrieve curriculum goal list: " + str(curriculum_name))
return ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_all_goals(self):\r\n\t\twith self.conn:\r\n\t\t\tself.c.execute(\"\"\"SELECT * FROM goals\"\"\")\r\n\t\t\ttup_list = self.c.fetchall()\r\n\t\treturn self._convert_tup_list_to_dict_list(tup_list)",
"def goals(self):\r\n return goals.Goals(self)",
"def get_goal(self, new_goal):\n\n GOAL = \"\"\"SELECT COUNT(*) FROM Section WHERE id = %s\"\"\"\n\n ret = None\n try:\n self.db_cursor.execute(\n \"\"\"SELECT description FROM Goal WHERE id = %s AND curriculum_name = %s\"\"\",\n (new_goal.id, new_goal.curriculum_name,))\n c = self.db_cursor.fetchall()\n ret = Goal()\n if c:\n ret.description = c[0][0]\n ret.id = new_goal.id\n ret.curriculum_name = new_goal.curriculum_name\n else:\n ret = None\n\n except:\n logging.warning(\"DBAdapter: Error- cannot retrieve goal: \" + str(new_goal.id))\n\n return ret",
"def get_tournament_list():\n database = TinyDB('db.json')\n tournament_list = database.table('tournaments').all()\n return tournament_list",
"def remove_curriculum_goals(self, curriculum):\n DELETE_CURRICULUM_GOALS = \"\"\"DELETE FROM Goal WHERE curriculum_name = %s\"\"\"\n\n try:\n self.db_cursor.execute(DELETE_CURRICULUM_GOALS, (curriculum.name,))\n self.db_connection.commit()\n except:\n logging.warning(\"DBAdapter: Error- Could not delete curriculum goals.\")",
"def get_project_goals(self, date=None):\n\n date = date or self.get_local_date()\n # get day bit from date\n day_bit = 2 ** date.weekday()\n\n # get all active project goals belonging to user,\n qry = Project.query.filter_by(active=True).outerjoin(Goal).filter(\n Project.user == self.id)\n # filter out inactive goals\n qry = qry.filter(Goal.days.op('&')(day_bit) != 0)\n # add Goal.time column to table and use in subquery\n qry = qry.add_column(Goal.time)\n sub = qry.subquery()\n # use db.func.sum to get total time goal for each project\n qry = db.session.query(sub.c.id, sub.c.name,\n db.func.sum(sub.c.time).label('goal'))\n qry = qry.group_by(sub.c.id, sub.c.name).order_by(sub.c.id)\n return qry.all()",
"def get_all_tasks(self):\r\n\t\twith self.conn:\r\n\t\t\tself.c.execute(\"\"\"SELECT task FROM goals\"\"\")\r\n\t\t\ttup_list = self.c.fetchall()\r\n\t\treturn [tup[0] for tup in tup_list]",
"def test_get_goals(self):\n pass",
"def _list_goals(context, message):\r\n context.log.error(message)\r\n # Execute as if the user had run \"./pants goals\".\r\n return Goal.execute(context, 'goals')",
"async def get_community_goals(\n community_goals_service: CommunityGoalsService = Depends(),\n) -> list[CommunityGoal]:\n return community_goals_service.get_community_goals()",
"def goals(self):\n return self._goals",
"def get_all_labs():\n return Lab.query.all()",
"def GetLearningObjectives(firebase: firebase) -> None:\n\n global learningObjectives\n obj_key_list = []\n\n result = firebase.get('/learningobjective', None)\n\n if result is None:\n return\n\n for i in result.keys():\n obj_key_list.append(i)\n\n for i in obj_key_list:\n lo = LearningObjective()\n lo.setId(i)\n lo.setCourseId(result[i]['courseid'])\n lo.setText(result[i]['content'])\n learningObjectives.append(lo)",
"def scrape_all_world_cup_goals():\n def scrape_goals_year(year):\n urls = scrape_world_cup_scoreboard(year)\n goals = []\n for url in urls:\n goals.extend(scrape_fifa_goals(url, 'FIFA World Cup'))\n return goals\n\n l = []\n for year in sorted(world_cup_mapping.keys()):\n l.extend(scrape_goals_year(year))\n return l",
"def get_goals_todo_info(self, cr, uid, context=None):\n all_goals_info = []\n plan_obj = self.pool.get('gamification.goal.plan')\n\n plan_ids = plan_obj.search(cr, uid, [('user_ids', 'in', uid), ('state', '=', 'inprogress')], context=context)\n for plan in plan_obj.browse(cr, uid, plan_ids, context=context):\n # serialize goals info to be able to use it in javascript\n serialized_goals_info = {\n 'id': plan.id,\n 'name': plan.name,\n 'visibility_mode': plan.visibility_mode,\n }\n user = self.browse(cr, uid, uid, context=context)\n serialized_goals_info['currency'] = user.company_id.currency_id.id\n\n if plan.visibility_mode == 'board':\n # board report should be grouped by planline for all users\n goals_info = plan_obj.get_board_goal_info(cr, uid, plan, subset_goal_ids=False, context=context)\n\n if len(goals_info) == 0:\n # plan with no valid planlines\n continue\n\n serialized_goals_info['planlines'] = []\n for planline_board in goals_info:\n vals = {'type_name': planline_board['goal_type'].name,\n 'type_description': planline_board['goal_type'].description,\n 'type_condition': planline_board['goal_type'].condition,\n 'type_computation_mode': planline_board['goal_type'].computation_mode,\n 'type_monetary': planline_board['goal_type'].monetary,\n 'type_suffix': planline_board['goal_type'].suffix,\n 'type_action': True if planline_board['goal_type'].action_id else False,\n 'type_display': planline_board['goal_type'].display_mode,\n 'target_goal': planline_board['target_goal'],\n 'goals': []}\n for goal in planline_board['board_goals']:\n # Keep only the Top 3 and the current user\n if goal[0] > 2 and goal[1].user_id.id != uid:\n continue\n\n vals['goals'].append({\n 'rank': goal[0] + 1,\n 'id': goal[1].id,\n 'user_id': goal[1].user_id.id,\n 'user_name': goal[1].user_id.name,\n 'state': goal[1].state,\n 'completeness': goal[1].completeness,\n 'current': goal[1].current,\n 'target_goal': goal[1].target_goal,\n })\n if uid == goal[1].user_id.id:\n vals['own_goal_id'] = goal[1].id\n serialized_goals_info['planlines'].append(vals)\n\n else:\n # individual report are simply a list of goal\n goals_info = plan_obj.get_indivual_goal_info(cr, uid, uid, plan, subset_goal_ids=False, context=context)\n\n if not goals_info:\n continue\n\n serialized_goals_info['goals'] = []\n for goal in goals_info:\n serialized_goals_info['goals'].append({\n 'id': goal.id,\n 'type_name': goal.type_id.name,\n 'type_description': goal.type_description,\n 'type_condition': goal.type_id.condition,\n 'type_monetary': goal.type_id.monetary,\n 'type_suffix': goal.type_id.suffix,\n 'type_action': True if goal.type_id.action_id else False,\n 'type_display': goal.type_id.display_mode,\n 'state': goal.state,\n 'completeness': goal.completeness,\n 'computation_mode': goal.computation_mode,\n 'current': goal.current,\n 'target_goal': goal.target_goal,\n })\n\n all_goals_info.append(serialized_goals_info)\n return all_goals_info",
"def slides():\n return Slide.objects.filter(live=True).order_by('order')",
"def goals(self):\r\n return Goals(self)",
"def get_all_finished_missions(self):\n self.lock.acquire()\n result = self.__Session.query(Mission).filter_by(status=3).all()\n self.lock.release()\n return result",
"def get_talks(self):\r\n return QtSql.QSqlQuery('''SELECT * FROM presentations''')",
"def goals(self):\n return self.problem.goals",
"def get(self, request, format = None):\n goalProgress = GoalProgress.objects.all()\n serializer = GoalProgressSerializer(goalProgress, many=True)\n return Response(serializer.data)",
"def get_four_future_courses():\n\n current_date = datetime.datetime.now().date()\n courses = Courses.query. \\\n filter(Courses.startDate >= current_date). \\\n order_by(Courses.startDate.asc()). \\\n limit(4).\\\n all()\n\n return courses",
"def player_standings():\n DB = connect()\n c = DB.cursor()\n c.execute(\"SELECT * FROM current_standings\")\n DB.commit()\n standings = c.fetchall()\n DB.close()\n return standings",
"def scrape_tournament_goals(competition_name, tournament_id, edition_ids):\n\n l = []\n for edition_id in edition_ids:\n urls = scrape_fifa_scoreboard(tournament_id, edition_id)\n goals = []\n for url in urls:\n l.extend(scrape_fifa_goals(url, competition_name))\n return l",
"def GetCourses(firebase: firebase) -> None:\n\n global courses\n obj_key_list = []\n\n result = firebase.get('/course', None)\n\n if result is None:\n return\n\n for i in result.keys():\n obj_key_list.append(i)\n\n for i in obj_key_list:\n course = Course()\n course.setId(i)\n course.setKnowledgeAreaId(result[i]['knowledgeareaid'])\n course.setCatalogId(result[i]['catalogid'])\n course.setTitle(result[i]['name'])\n course.setDescription(result[i]['description'])\n course.setInstructor(result[i]['instructor'])\n course.setFee(result[i]['fee'])\n courses.append(course)",
"def getById(self, id_goals):\n lparam = [id_goals]\n rep = AbstractDAO._read(self, R_READBYID, lparam)\n return self.__fetch_to_object(rep, True)",
"def get_departments() -> list:\n return Department.query.all()",
"def GET(self, *args, **kwargs):\n return list(db.Mood.select()) #{'args': args, 'kwargs': kwargs}\n #results = list(db.Mood.select())\n #return results",
"def project_list(cursor):\n query = \"SELECT * FROM projects\"\n try:\n cursor.execute(query, {},)\n except Exception as e:\n on_error(e)\n else:\n projects = cursor.fetchall()\n raise Return((projects, None))",
"async def get_all_record():\n # X_new = item.to_df()\n # item_str = item.to_string()\n # project_code = int(item_str[item_str.find('=')+1:])\n pg = PostgreSQL()\n return_json = pg.fetch_all_records()\n return return_json"
]
| [
"0.6867574",
"0.59022593",
"0.57518893",
"0.5690444",
"0.5685245",
"0.56651735",
"0.5651523",
"0.5633112",
"0.56032073",
"0.55862516",
"0.55823267",
"0.5527885",
"0.5467755",
"0.5417775",
"0.5395148",
"0.531531",
"0.53115666",
"0.5295306",
"0.52783984",
"0.52678376",
"0.52555335",
"0.52430505",
"0.522352",
"0.52145016",
"0.5206085",
"0.51901305",
"0.51724833",
"0.5165818",
"0.51195824",
"0.51132274"
]
| 0.79522294 | 0 |
Function to retrieve the section grades from the db and return them as a list (this works for SectionGrades table and SectionGoalGrades table) | def get_section_grades(self, section, section_goal=False):
if not section_goal:
SECTION_GRADES = """SELECT count_ap, count_a, count_am, count_bp, count_b, count_bm, count_cp, count_c, count_cm, count_dp, count_d, count_dm, count_f,count_i, count_w FROM SectionGrades WHERE course = %s AND semester = %s AND year = %s AND section_id = %s"""
else:
SECTION_GRADES = """SELECT count_ap, count_a, count_am, count_bp, count_b, count_bm, count_cp, count_c, count_cm, count_dp, count_d, count_dm, count_f FROM SectionGoalGrades WHERE course = %s AND semester = %s AND year = %s AND section_id = %s AND goal_id = %s"""
ret = None
#try:
if not section_goal:
self.db_cursor.execute(
SECTION_GRADES,
(section.course_name, section.semester, section.year, section.section_id))
else:
self.db_cursor.execute(
SECTION_GRADES,
(section.course, section.semester, section.year, section.section_id, section.goal_id))
section_grades = self.db_cursor.fetchall()
if len(section_grades) > 0 and not section_goal:
ret = SectionGrades()
ret.section_id = section.section_id
ret.semester = section.semester
ret.year = section.year
ret.course = section.course_name
ret.count_ap = section_grades[0][0]
ret.count_a = section_grades[0][1]
ret.count_am = section_grades[0][2]
ret.count_bp = section_grades[0][3]
ret.count_b = section_grades[0][4]
ret.count_bm = section_grades[0][5]
ret.count_cp = section_grades[0][6]
ret.count_c = section_grades[0][7]
ret.count_cm = section_grades[0][8]
ret.count_dp = section_grades[0][9]
ret.count_d = section_grades[0][10]
ret.count_dm = section_grades[0][11]
ret.count_f = section_grades[0][12]
ret.count_i = section_grades[0][13]
ret.count_w = section_grades[0][14]
ret.course = section.course_name
ret.semester = section.semester
ret.year = section.year
ret.section_id = section.section_id
elif len(section_grades) > 0 and section_goal:
ret = SectionGoalGrades()
ret.section_id = section.section_id
ret.semester = section.semester
ret.year = section.year
ret.course = section.course
ret.goal_id = section.goal_id
ret.count_ap = section_grades[0][0]
ret.count_a = section_grades[0][1]
ret.count_am = section_grades[0][2]
ret.count_bp = section_grades[0][3]
ret.count_b = section_grades[0][4]
ret.count_bm = section_grades[0][5]
ret.count_cp = section_grades[0][6]
ret.count_c = section_grades[0][7]
ret.count_cm = section_grades[0][8]
ret.count_dp = section_grades[0][9]
ret.count_d = section_grades[0][10]
ret.count_dm = section_grades[0][11]
ret.count_f = section_grades[0][12]
else:
ret = None
#except:
# logging.warning("DBAdapter: Error- cannot retrieve section grades: " + str(id))
return ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_student_grade(class_id):\n grades = []\n quiz_grade = query_db(\n \"SELECT quizzes.name, grade FROM quiz_grades JOIN quizzes \"\n \"ON quiz_grades.quiz_id=quizzes.id JOIN topics \"\n \"ON quizzes.topic_id=topics.id JOIN classes \"\n \"ON topics.class_id=classes.id \"\n \"WHERE student_id=? AND topics.class_id=?;\",\n [flask.session[\"id\"], class_id],\n )\n for grade in quiz_grade:\n student_grade_quiz = {}\n student_grade_quiz[\"thing_name\"] = grade[0]\n student_grade_quiz[\"grade\"] = grade[1]\n grades.append(student_grade_quiz)\n assignment_grade = query_db(\n \"SELECT assignments.name, grade FROM assignment_grades \"\n \"JOIN assignments ON assignment_grades.assignment_id=assignments.id \"\n \"JOIN topics on assignments.topic_id=topics.id JOIN classes \"\n \"ON topics.class_id=classes.id WHERE student_id=? \"\n \"AND topics.class_id=?;\",\n [flask.session[\"id\"], class_id],\n )\n for grade in assignment_grade:\n student_grade_assignment = {}\n student_grade_assignment[\"thing_name\"] = grade[0]\n student_grade_assignment[\"grade\"] = grade[1]\n grades.append(student_grade_assignment)\n return grades",
"def assignment_get_grades(assignment, section_id=None, problem=None):\n\tif problem:\n\t\treturn assignment_get_problem_grades(problem, section_id)\n\n\tif section_id:\n\t\tsection_users = db((db.sections.id==db.section_users.section) & (db.auth_user.id==db.section_users.auth_user))\n\t\tusers = section_users(db.auth_user.course_id == assignment.course)\n\t\tusers = users(db.sections.id == section_id)\n\telse:\n\t\tusers = db(db.auth_user.course_id == assignment.course)\n\tusers = users.select(\n\t\tdb.auth_user.ALL,\n\t\torderby = db.auth_user.last_name,\n\t\t)\n\tgrades = db(db.grades.assignment == assignment.id)\n\tgrades = grades.select(db.grades.ALL)\n\tfor u in users:\n\t\tu.grade = None\n\t\tu.comment = \"\"\n\t\tfor g in grades:\n\t\t\tif g.auth_user.id == u.id:\n\t\t\t\tu.grade = g.score\n\treturn users",
"def get_class_grades(class_id):\n\n grades = []\n quiz_grades = query_db(\n \"SELECT people.name, quizzes.name, grade FROM quiz_grades JOIN people \"\n \"ON quiz_grades.student_id=people.id JOIN quizzes \"\n \"ON quiz_grades.quiz_id=quizzes.id JOIN topics \"\n \"ON quizzes.topic_id=topics.id JOIN classes \"\n \"ON topics.class_id=classes.id WHERE classes.id=?;\",\n [class_id],\n )\n for grade in quiz_grades:\n grade_class = {}\n grade_class[\"student_name\"] = grade[0]\n grade_class[\"thing_name\"] = str(grade[1]) + \" (Quiz)\"\n grade_class[\"grade\"] = grade[2]\n grades.append(grade_class)\n assignment_grades = query_db(\n \"SELECT people.name, assignments.name, grade FROM assignment_grades \"\n \"JOIN people ON assignment_grades.student_id=people.id \"\n \"JOIN assignments ON assignment_grades.assignment_id=assignments.id \"\n \"JOIN topics ON assignments.topic_id=topics.id JOIN classes \"\n \"ON topics.class_id=classes.id WHERE classes.id=?;\",\n [class_id],\n )\n for grade in assignment_grades:\n grade_assign = {}\n grade_assign[\"student_name\"] = grade[0]\n grade_assign[\"thing_name\"] = str(grade[1]) + \" (Assignment)\"\n grade_assign[\"grade\"] = grade[2]\n grades.append(grade_assign)\n return grades",
"def get_grades(session): \n res = _get_grades_step_0(session)\n dossier_path = re.search('href=\"([\\/a-zA-Z0-9\\.]*)\" title=\"Mon dossier\"', res).group(1)\n\n print(\"[STEP 0] got dossier path: \" + dossier_path)\n\n res = _get_grades_step_1(session, dossier_path)\n\n # Get the list of years available (1A, 2A, 3A) and their identifiers\n res = _get_grades_step_2(session)\n rgx = re.finditer(r'''<u>([A-Z\\/0-9]*)<\\/u><\\/a><\\/td><td width=\"30%\"><a href=\"#\" onclick=\"return oamSubmitForm\\('([a-zA-Z0-9_]*)','([a-zA-Z0-9_:]*)',null,\\[\\['row','([0-9]*)'\\]\\]\\);\" id=\"([a-zA-Z0-9_:]*)\">([a-zA-Z0-9 ]*)<\\/a>''', res)\n\n years = []\n for match in rgx:\n years.append({\n \"id\": match.group(1),\n \"name\": match.group(6),\n \"param\": match.group(2),\n \"paramval\": match.group(5),\n \"row\": match.group(4)\n })\n\n print(\"[STEP 2] got years:\", years)\n\n year_grades = []\n for year in years:\n res = _get_grades_step_3(session, year)\n\n soup = BeautifulSoup(res, 'html.parser')\n table = soup.find('table', attrs={'class':'portlet-table'})\n table_body = table.find('tbody')\n rows = table_body.find_all('tr')\n\n rawgrades = []\n for row in rows:\n cols = row.find_all('td')\n cols = [ele.text.strip() for ele in cols]\n\n data = []\n for ele in cols:\n if ele:\n data.append(ele)\n\n rawgrades.append(data)\n\n grades = []\n gradergx = re.compile('^[0-9]{1,2}$')\n for line in rawgrades:\n if len(line) == 3 and gradergx.match(line[2]):\n grades.append({\n \"module_code\": line[0],\n \"module_name\": line[1],\n \"module_grade\": int(line[2])\n })\n\n print(\"[STEP 3] got {nb} modules with grades for year {year}\".format(\n nb=len(grades), year=year['name']))\n\n year_grades.append({\n 'year':{'id': year['id'], 'label': year['name']},\n 'grades': grades,\n 'raw': rawgrades\n })\n\n return year_grades",
"def get_all_grades(first_name, last_name):\n QUERY = \"\"\" SELECT s.first_name, s.last_name, g.project_title, g.grade \n FROM Students AS s \n INNER JOIN Grades AS g ON s.github = g.student_github\n WHERE s.first_name = ? AND s.last_name = ? \"\"\"\n db_cursor.execute(QUERY, (first_name, last_name))\n grades_data = db_cursor.fetchone()\n print grades_data[-1]",
"def getGrades(self,student):\n try:\n return self.grades[student.getIdNum()][:]\n except KeyError:\n raise ValueError('Student not in grade book')",
"def get_sections(self, start_year, start_semester, end_year, end_semester, course, section_goal=False):\n SECTION_GRADES = \"\"\"SELECT section_id, num_students, comment1, comment2 FROM Section WHERE semester = %s AND year = %s AND course_name = %s\"\"\"\n\n spring = 'R'\n fall = 'F'\n summer = 'S'\n winter = 'W'\n\n sem1 = {1 : 'F', 2 : 'W', 3 : 'R', 4 : 'S'}\n sem2 = {'F' : 1, 'W' : 2, 'R' : 3,'S' : 4}\n start = sem2[start_semester]\n end = sem2[end_semester]\n\n semester_list = []\n year_list = []\n if start_year != end_year:\n num_years = end_year-start_year\n for i in range(0, num_years+1):\n year_list.append(start_year+i)\n ctr = start\n\n # todo: very jank way of doing this but idk what else to do rn\n # basically, for each year, we put on 4 semesters in order of when the start semester occurs\n # we should be putting on one of each\n for i in range(1,5):\n semester_list.append(sem1[ctr%5])\n ctr+=1\n if ctr%5 == 0:\n ctr+=1\n # when we're done with each year, we pop off the end of semester_list until we have the right end semester\n if semester_list:\n while semester_list[len(semester_list)-1] != sem1[end]:\n semester_list.pop(len(semester_list)-1)\n\n else:\n year_list.append(start_year)\n for i in range(start, end+1):\n year_list.append(i)\n\n\n ret = None\n section_list1 = []\n section_list2 = []\n current_section = Section()\n #try:\n for i in year_list:\n for j in semester_list:\n self.db_cursor.execute(SECTION_GRADES, (j, i, course.name))\n section_list1 = self.db_cursor.fetchall()\n for k in section_list1:\n current_section.course_name = course.name\n current_section.section_id = k[0]\n current_section.num_students = k[1]\n current_section.comment1 = k[2]\n current_section.comment2 = k[3]\n current_section.year = i\n current_section.semester = j\n section_list2.append(current_section)\n if section_list2:\n ret = section_list2\n else:\n ret = None\n\n #except:\n #logging.warning(\"DBAdapter: Error- cannot retrieve sections: \" + str(id))\n\n return ret",
"def get_grade(self, student_id):\r\n row = self.grades.get(student_id, [])\r\n ncomp = len(self.components)\r\n return [row.get(comp, None) for comp in range(ncomp)]",
"def getGrades(self, student):\n try: #return copy of list of student's grades\"\"\"\n return self.grades[student.getIdNum()][:]\n except:\n raise ValueError('Student not in mapping')",
"def get_grades(self, student):\n try:\n return self.grades[student.id][:] # notice that a copy is returned\n except KeyError:\n raise ValueError('Student not in Grade Book.')",
"async def get_grades(\n self, last_sync: datetime = None, deleted=False, **kwargs\n ) -> Union[AsyncIterator[Grade], List[int]]:\n return Grade.get(self._api, last_sync, deleted, **kwargs)",
"def chapter_grades(self):\n return self.course_grade.chapter_grades",
"def getGrades(self, student):\n try:\n return self.grades[student.getIDNumber()][:]\n except KeyError:\n raise ValueError(\"Student not in Gradebook\")",
"def get_sections_of_a_course(self, course_name, year, semester_name):\n if semester_name not in SEMESTER_NAME_MAP.keys():\n logging.warning(\"DBAdapter: Error- invalid semester name.\")\n return None\n semester = SEMESTER_NAME_MAP[semester_name]\n return_list = []\n\n GET_SECTIONS = \"\"\"SELECT section_id, num_students, comment1, comment2 FROM Section WHERE course_name = %s AND semester = %s AND year = %s\"\"\"\n\n #try:\n self.db_cursor.execute(GET_SECTIONS, (course_name, semester, year))\n tups = self.db_cursor.fetchall()\n for t in tups:\n new_section = Section()\n new_section.semester = semester\n new_section.year = year\n new_section.course_name = course_name\n new_section.section_id = t[0]\n new_section.num_students = t[1]\n new_section.comment1 = t[2]\n new_section.comment2 = t[3]\n return_list.append(new_section)\n\n return return_list\n\n \"\"\"\n except:\n logging.warning(\"DBAdapter: Error- failed to retrieve all sections of a course.\")\n return []\n \"\"\"",
"def assignment_grade(id, session_id, course_id):\n\n user_id = session.get('user_id')\n\n con = db.get_db()\n cur = con.cursor()\n cur.execute(\"\"\"SELECT DISTINCT(ROUND(grades.points_received / grades.total_points, 2) * 100) as assignment_grade,\n grades.total_points as total, grades.points_received as earned,\n grades.submission as submission, grades.feedback as feedback,\n grades.student_id, grades.assignment_id as assign_id, assignments.name as assign_name,\n assignments.description as description,\n grades.grade_id, roster.session_id as class_session, courses.name as name\n\t FROM courses JOIN sessions on courses.course_id=sessions.id\n\t JOIN assignments on assignments.session_id=sessions.id\n JOIN grades on grades.assignment_id=assignments.assignment_id\n JOIN roster on roster.session_id=sessions.id\n WHERE grades.assignment_id= %s\n AND grades.student_id= %s\"\"\",\n (id, user_id))\n\n grade = cur.fetchone()\n cur.close()\n con.close()\n\n return render_template(\"/layouts/gradebook/assignment_grade.html\", course_id=course_id, session_id=session_id, id=id, grade=grade)",
"def get_grade(soup):\n\n # up there with with route name\n grade_table = soup.h3\n\n # look for grades in spans\n grade = []\n for s in grade_table.find_all('span'):\n\n # class names are the grading systems\n if s['class'] != None:\n head = s['class'][0]\n head = head.encode('utf8', errors = 'strict')\n\n # grade are showing with text\n body = s.get_text()\n body = body.encode('utf8', errors = 'ignore')\n\n grade.append(body)\n\n # extract tbe grades\n grade_data = {}\n for g in grade:\n h = g.split(SPLIT_CHAR)\n if len(h) > 1:\n grade_data['rate'+h[0].strip()] = h[1]\n\n return grade_data",
"def get_grade_entries(user, assignments_map, students_map):\n grade_entries = GradeEntry.query(ancestor=get_parent_key(user)).fetch()\n for grade_entry in grade_entries:\n grade_entry.assignment = assignments_map[grade_entry.assignment_key]\n grade_entry.student = students_map[grade_entry.student_key]\n return grade_entries",
"def read_grades(gradefile):\n #skip over header\n line = gradefile.readline()\n while line != '\\n':\n line = gradefile.readline()\n #read the grades, accumlating them into a list.\n grades = []\n \n line = gradefile.readline()\n while line != '':\n #We have a string containing info for single student.\n #Find last space and take everything after space.\n grade = line[line.rfind(' ') + 1:]\n grades.append(float(grade))\n line = gradefile.readline()\n \n return grades",
"def assignment_list_with_grades(user_id): # IN USE\n logged_user = Student.make_student(user_id)\n Assignment.list_from_sql()\n assignments_list = Assignment.pass_assign_for_student()\n assignments_list_to_print = []\n for assignment in assignments_list:\n type_of_assignment = 'Individual'\n if assignment.GROUP == '1':\n type_of_assignment = 'Group'\n new_line = [assignment.TITLE, assignment.ID_MENTOR, assignment.START_DATA, assignment.END_DATA,\n type_of_assignment]\n submission = Submission.find_submission(logged_user, assignment)\n if submission:\n new_line.append('submitted')\n if submission.GRADE:\n new_line.append(submission.GRADE)\n else:\n new_line.append('None')\n else:\n new_line.append('not submitted')\n new_line.append('None')\n new_line.append(assignment.ID)\n assignments_list_to_print.append(new_line)\n\n return assignments_list_to_print",
"def get_student_grade_summary_data(request, course, get_grades=True, get_raw_scores=False, use_offline=False):\r\n course_key = course.id\r\n enrolled_students = User.objects.filter(\r\n courseenrollment__course_id=course_key,\r\n courseenrollment__is_active=1,\r\n ).prefetch_related(\"groups\").order_by('username')\r\n\r\n header = [_('ID'), _('Username'), _('Full Name'), _('edX email'), _('External email')]\r\n\r\n datatable = {'header': header, 'students': enrolled_students}\r\n data = []\r\n\r\n gtab = GradeTable()\r\n\r\n for student in enrolled_students:\r\n datarow = [student.id, student.username, student.profile.name, student.email]\r\n try:\r\n datarow.append(student.externalauthmap.external_email)\r\n except: # ExternalAuthMap.DoesNotExist\r\n datarow.append('')\r\n\r\n if get_grades:\r\n gradeset = student_grades(student, request, course, keep_raw_scores=get_raw_scores, use_offline=use_offline)\r\n log.debug('student={0}, gradeset={1}'.format(student, gradeset))\r\n with gtab.add_row(student.id) as add_grade:\r\n if get_raw_scores:\r\n # TODO (ichuang) encode Score as dict instead of as list, so score[0] -> score['earned']\r\n for score in gradeset['raw_scores']:\r\n add_grade(score.section, getattr(score, 'earned', score[0]))\r\n else:\r\n for grade_item in gradeset['section_breakdown']:\r\n add_grade(grade_item['label'], grade_item['percent'])\r\n student.grades = gtab.get_grade(student.id)\r\n\r\n data.append(datarow)\r\n\r\n # if getting grades, need to do a second pass, and add grades to each datarow;\r\n # on the first pass we don't know all the graded components\r\n if get_grades:\r\n for datarow in data:\r\n # get grades for student\r\n sgrades = gtab.get_grade(datarow[0])\r\n datarow += sgrades\r\n\r\n # get graded components and add to table header\r\n assignments = gtab.get_graded_components()\r\n header += assignments\r\n datatable['assignments'] = assignments\r\n\r\n datatable['data'] = data\r\n return datatable",
"def student_grades(student, course):\n cg = CourseGradeFactory().create(student, course)\n return cg.summary",
"def get_sections():\n return Section.objects.all()",
"def get_grade(course_det):\n return course_det[1]",
"def get_grade_levels_and_names(self, service):\n range_name = ['Form Responses 2!B2:C']\n spreadsheet_id = '1GD5UBfEcWwxopL3pS7t4MIjFWFzk_NsPXT24T1JxVa8'\n result = service.spreadsheets().values().batchGet(\n spreadsheetId=spreadsheet_id, ranges=range_name, majorDimension='COLUMNS').execute()\n ranges = result.get('valueRanges', [])\n print('{0} Grades and Names retrieved.'.format(len(ranges)))\n return ranges",
"def grades(self) -> List[int]:\n\n return grades_present(self, _eps)",
"def __ui_list_grades_by_student(self):\n student_id = input(\"Give student ID: \")\n try:\n list_of_grades = self.__grade_controller.get_grades_by_student(student_id)\n if len(list_of_grades) == 0:\n print(\"Student doesn't have any grade.\")\n return\n\n for g in list_of_grades:\n print(str(g))\n\n except GradeException as ge:\n print(ge)\n return",
"def grade(self, grade_number: int):\n return self.students.setdefault(grade_number, [])",
"def addGrade(gradeName, gradeColumn):\n\n grades = []\n sheet = wb[wb.sheetnames[0]]\n for i in range(0,13):\n sourceValue = sheet.cell(row=10+i, column=gradeColumn).value\n grades.append(sourceValue)\n allGrades[wb.sheetnames[0]] [gradeName] = grades\n return allGrades",
"def gradeReport(course):\n report = []\n for student in course.allStudents():\n total = 0.0\n numberOfGrades = 0\n for grade in course.getGrades(student):\n total += grade\n numberOfGrades += 1\n \n try:\n average = total / numberOfGrades\n report.append(str(student) + \"'s mean grade is \" + str(average))\n except ZeroDivisionError:\n report.append(str(student) + \" has no grades\")\n \n return '\\n'.join(report)",
"def get_students_problem_grades(request, csv=False):\r\n module_state_key = Location.from_deprecated_string(request.GET.get('module_id'))\r\n csv = request.GET.get('csv')\r\n\r\n # Query for \"problem grades\" students\r\n students = models.StudentModule.objects.select_related('student').filter(\r\n module_state_key=module_state_key,\r\n module_type__exact='problem',\r\n grade__isnull=False,\r\n ).values('student__username', 'student__profile__name', 'grade', 'max_grade').order_by('student__profile__name')\r\n\r\n results = []\r\n if not csv:\r\n # Restrict screen list length\r\n # Adding 1 so can tell if list is larger than MAX_SCREEN_LIST_LENGTH\r\n # without doing another select.\r\n for student in students[0:MAX_SCREEN_LIST_LENGTH + 1]:\r\n student_dict = {\r\n 'name': student['student__profile__name'],\r\n 'username': student['student__username'],\r\n 'grade': student['grade'],\r\n }\r\n\r\n student_dict['percent'] = 0\r\n if student['max_grade'] > 0:\r\n student_dict['percent'] = round(student['grade'] * 100 / student['max_grade'])\r\n results.append(student_dict)\r\n\r\n max_exceeded = False\r\n if len(results) > MAX_SCREEN_LIST_LENGTH:\r\n # Remove the last item so list length is exactly MAX_SCREEN_LIST_LENGTH\r\n del results[-1]\r\n max_exceeded = True\r\n\r\n response_payload = {\r\n 'results': results,\r\n 'max_exceeded': max_exceeded,\r\n }\r\n return JsonResponse(response_payload)\r\n else:\r\n tooltip = request.GET.get('tooltip')\r\n filename = sanitize_filename(tooltip[:tooltip.rfind(' - ')])\r\n\r\n header = [_(\"Name\").encode('utf-8'), _(\"Username\").encode('utf-8'), _(\"Grade\").encode('utf-8'), _(\"Percent\").encode('utf-8')]\r\n for student in students:\r\n\r\n percent = 0\r\n if student['max_grade'] > 0:\r\n percent = round(student['grade'] * 100 / student['max_grade'])\r\n results.append([student['student__profile__name'], student['student__username'], student['grade'], percent])\r\n\r\n response = create_csv_response(filename, header, results)\r\n return response"
]
| [
"0.7101856",
"0.67804635",
"0.6752108",
"0.6615757",
"0.6474653",
"0.6421206",
"0.63589877",
"0.631259",
"0.6258068",
"0.62427837",
"0.62184596",
"0.61503845",
"0.6106861",
"0.6070258",
"0.6044991",
"0.60328126",
"0.60154426",
"0.6001862",
"0.5941723",
"0.59187573",
"0.5908769",
"0.58956814",
"0.58780503",
"0.5877121",
"0.5861699",
"0.58209664",
"0.5801926",
"0.5707944",
"0.5706604",
"0.5691912"
]
| 0.7776043 | 0 |
Function to retrieve a list of sections based off of a course | def get_sections_of_a_course(self, course_name, year, semester_name):
if semester_name not in SEMESTER_NAME_MAP.keys():
logging.warning("DBAdapter: Error- invalid semester name.")
return None
semester = SEMESTER_NAME_MAP[semester_name]
return_list = []
GET_SECTIONS = """SELECT section_id, num_students, comment1, comment2 FROM Section WHERE course_name = %s AND semester = %s AND year = %s"""
#try:
self.db_cursor.execute(GET_SECTIONS, (course_name, semester, year))
tups = self.db_cursor.fetchall()
for t in tups:
new_section = Section()
new_section.semester = semester
new_section.year = year
new_section.course_name = course_name
new_section.section_id = t[0]
new_section.num_students = t[1]
new_section.comment1 = t[2]
new_section.comment2 = t[3]
return_list.append(new_section)
return return_list
"""
except:
logging.warning("DBAdapter: Error- failed to retrieve all sections of a course.")
return []
""" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def select_all_sections(self, course_id):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"\"\"SELECT * FROM course_sections \n WHERE course_id = ?\"\"\",\n (course_id,),\n )\n return cursor.fetchall()",
"def get_sections(self, course):\n return course.available_sections if self.free_sections_only else course.sections",
"def get_sections(self, start_year, start_semester, end_year, end_semester, course, section_goal=False):\n SECTION_GRADES = \"\"\"SELECT section_id, num_students, comment1, comment2 FROM Section WHERE semester = %s AND year = %s AND course_name = %s\"\"\"\n\n spring = 'R'\n fall = 'F'\n summer = 'S'\n winter = 'W'\n\n sem1 = {1 : 'F', 2 : 'W', 3 : 'R', 4 : 'S'}\n sem2 = {'F' : 1, 'W' : 2, 'R' : 3,'S' : 4}\n start = sem2[start_semester]\n end = sem2[end_semester]\n\n semester_list = []\n year_list = []\n if start_year != end_year:\n num_years = end_year-start_year\n for i in range(0, num_years+1):\n year_list.append(start_year+i)\n ctr = start\n\n # todo: very jank way of doing this but idk what else to do rn\n # basically, for each year, we put on 4 semesters in order of when the start semester occurs\n # we should be putting on one of each\n for i in range(1,5):\n semester_list.append(sem1[ctr%5])\n ctr+=1\n if ctr%5 == 0:\n ctr+=1\n # when we're done with each year, we pop off the end of semester_list until we have the right end semester\n if semester_list:\n while semester_list[len(semester_list)-1] != sem1[end]:\n semester_list.pop(len(semester_list)-1)\n\n else:\n year_list.append(start_year)\n for i in range(start, end+1):\n year_list.append(i)\n\n\n ret = None\n section_list1 = []\n section_list2 = []\n current_section = Section()\n #try:\n for i in year_list:\n for j in semester_list:\n self.db_cursor.execute(SECTION_GRADES, (j, i, course.name))\n section_list1 = self.db_cursor.fetchall()\n for k in section_list1:\n current_section.course_name = course.name\n current_section.section_id = k[0]\n current_section.num_students = k[1]\n current_section.comment1 = k[2]\n current_section.comment2 = k[3]\n current_section.year = i\n current_section.semester = j\n section_list2.append(current_section)\n if section_list2:\n ret = section_list2\n else:\n ret = None\n\n #except:\n #logging.warning(\"DBAdapter: Error- cannot retrieve sections: \" + str(id))\n\n return ret",
"def get_available_sections(url, headers):\n def _make_url(section_soup): # FIXME: Extract from here and test\n return BASE_URL + section_soup.ul.find('a')['href']\n\n def _get_section_name(section_soup): # FIXME: Extract from here and test\n return section_soup.h3.a.string.strip()\n\n def _make_subsections(section_soup):\n subsections_soup = section_soup.ul.find_all(\"li\")\n # FIXME correct extraction of subsection.name (unicode)\n subsections = [SubSection(position=i,\n url=BASE_URL + s.a['href'],\n name=s.p.string)\n for i, s in enumerate(subsections_soup, 1)]\n return subsections\n\n courseware = get_page_contents(url, headers)\n soup = BeautifulSoup(courseware)\n sections_soup = soup.find_all('div', attrs={'class': 'chapter'})\n\n sections = [Section(position=i,\n name=_get_section_name(section_soup),\n url=_make_url(section_soup),\n subsections=_make_subsections(section_soup))\n for i, section_soup in enumerate(sections_soup, 1)]\n return sections",
"def canvas_api_sections(state, course_id):\n\n api = state.canvas_api()\n for section in api.list_sections(course_id):\n click.echo(str(section))",
"def filter_sections(courses, selected_sections):\n for c in courses:\n c_key = f\"{c.name} {c.num}\"\n\n lab_section = selected_sections[c_key][\"lab\"]\n lecture_section = selected_sections[c_key][\"lecture\"]\n tutorial_section = selected_sections[c_key][\"tutorial\"]\n\n c.labs = [s for s in c.labs if s.section == lab_section]\n c.lectures = [s for s in c.lectures if s.section == lecture_section]\n c.tutorials = [s for s in c.tutorials if s.section == tutorial_section]",
"def get_courses(std):\n return std[2]",
"def parse_get_section(xml_course):\n parse_section = parse_create_section(xml_course)\n query_constraints = {\n \"crn\": parse_section[\"crn\"]\n }\n params = urllib.urlencode({\"where\": json.dumps(query_constraints)})\n connection = httplib.HTTPSConnection(PARSE_API_URL, PARSE_API_PORT)\n connection.connect()\n connection.request(\n \"GET\",\n \"%s?%s\" % (SECTIONS_ENDPOINT, params),\n '',\n {\"X-Parse-Application-Id\": app_id, \"X-Parse-REST-API-Key\": rest_api_key}\n )\n response = json.loads(connection.getresponse().read())\n if response.get(\"results\"):\n return response[\"results\"][0]\n else:\n return None",
"def get_sections():\n return Section.objects.all()",
"def get_course(data):\n\n return {item['course'] for item in data}",
"def get_sections(self,):\n self.logger.info(\"\\t[+] get_sections [+]\")\n try:\n return self.sections.select().execute()\n except Exception as e:\n self.logger.critical(\"\\t[-] Exception occured [-]\")\n self.logger.critical(\"\\t\" + str(e))\n self.logger.critical(\"\\t[-] Exception occured [-]\")",
"def select_course_detail(self, course_id, course_section_id):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"\"\"SELECT \n c.subject, c.course_num, c.course_title,\n cs.course_section_id, cs.schedule_days, cs.start_time, cs.end_time,\n i.first_name || ' ' || i.last_name AS 'Instructor Name', c.course_units\n FROM courses c\n JOIN course_sections cs\n ON c.course_id = cs.course_id\n JOIN instructors i\n ON cs.instructor_id = i.instructor_id\n WHERE c.course_id = ? AND cs.course_section_id = ?\"\"\",\n (course_id, course_section_id),\n )\n return cursor.fetchone()",
"def display_sections(course_name, sections):\n num_sections = len(sections)\n _print('%s has %d sections so far' % (course_name, num_sections))\n for i, section in enumerate(sections, 1):\n _print('%d - Download %s videos' % (i, section.name))\n _print('%d - Download them all' % (num_sections + 1))",
"def canvas_api_section_students(state, course_id, section_id):\n\n api = state.canvas_api()\n for student in api.list_section_students(course_id, section_id):\n click.echo(str(student))",
"def parseSection(self, response):\n sel = Selector(response)\n sections = sel.xpath('//table[@class=\"sections responsive\"]//tr[not(@class=\"headers\")]')\n for s in sections:\n item = CourseItem(response.request.meta[\"item\"])\n item['section'] = s.xpath('@data-section-id').get().strip()\n item['instructors'] = s.css('.instructor::text').get()\n if item['instructors'] != None:\n item['instructors'].strip()\n item['instructors'] = [x.strip() for x in re.split(',', item['instructors'])]\n item['syllabus'] = s.css('.syllabus a::attr(href)').get()\n if item['syllabus'] != None:\n item['syllabus'].strip()\n return item\n \n\n \"\"\"\n Ignore the code below this. I was trying to get\n the times, days, and number registered from the class sections\n \"\"\"\n #times = s.xpath('//td[@class=\"time\"]/text()').get().strip()\n #times = re.split('-', times)\n #starttime = times[0]\n #endtime = times[1]\n #endt = dt.datetime.strptime(endtime, '%H:%M%p')\n # TODO: Check if \"am\"/\"pm\" from endt, & if endt hour is greater/less than startt \n #startt = dt.datetime.strptime(starttime, '%H:%M')\n #days = s.xpath('//td[@class=\"days\"]/text()').get().strip()\n #days = re.split(',', days)\n #numdays = len(days]\n \n #cap = s.xpath('//td[@class=\"registered\"]//a/text()').get().strip()\n #cap = re.split(' of ', cap.strip())\n #item['capacity'] = cap[1]",
"def _get_courses(self) -> None:\n\n courses_content: NavigableString = self.soup.find(\"div\", \n {\"class\": \"coursesContent\"})\n course_items: ResultSet = courses_content.find_all(\"div\", \n {\"class\": \"courseItem\"})\n\n for item in course_items:\n course_name: str = item.a[\"href\"].split(\"/\")[-2].lower()\n course_data: ParseType = self._parse(item)\n self._update(course_name, course_data)",
"def get_courses_info(url, headers):\n dash = get_page_contents(url, headers)\n soup = BeautifulSoup(dash)\n courses_soup = soup.find_all('article', 'course')\n courses = []\n for course_soup in courses_soup:\n course_id = None\n course_name = course_soup.h3.text.strip()\n course_url = None\n course_state = 'Not yet'\n try:\n # started courses include the course link in the href attribute\n course_url = BASE_URL + course_soup.a['href']\n if course_url.endswith('info') or course_url.endswith('info/'):\n course_state = 'Started'\n # The id of a course in edX is composed by the path\n # {organization}/{course_number}/{course_run]\n course_id = course_soup.a['href'][9:-5]\n except KeyError:\n pass\n courses.append(Course(id=course_id,\n name=course_name,\n url=course_url,\n state=course_state))\n return courses",
"def get_courses(self):\r\n\r\n return self.def_ms.get_courses()",
"def get_categs_section(sect):\n return Category.objects.filter(section__section=sect)",
"def build_course_sections(self, course_section_data):\n return [self.build_course_section(**row) for row in course_section_data]",
"def extract_courses():\n if settings.XPRO_COURSES_API_URL:\n return requests.get(settings.XPRO_COURSES_API_URL, timeout=20).json()\n return []",
"def available_courses(self):\r\n def _get_course_name(el):\r\n # The first component in the link text is the course number\r\n _, course_name = el.text.split(' ', 1)\r\n return course_name\r\n\r\n return self.q(css='section.info > hgroup > h3 > a').map(_get_course_name).results",
"def get_course_about_section(course, section_key):\r\n\r\n # Many of these are stored as html files instead of some semantic\r\n # markup. This can change without effecting this interface when we find a\r\n # good format for defining so many snippets of text/html.\r\n\r\n # TODO: Remove number, instructors from this list\r\n if section_key in ['short_description', 'description', 'key_dates', 'video',\r\n 'course_staff_short', 'course_staff_extended',\r\n 'requirements', 'syllabus', 'textbook', 'faq', 'more_info',\r\n 'number', 'instructors', 'overview',\r\n 'effort', 'end_date', 'prerequisites', 'ocw_links']:\r\n\r\n try:\r\n\r\n request = get_request_for_thread()\r\n\r\n loc = course.location.replace(category='about', name=section_key)\r\n\r\n # Use an empty cache\r\n field_data_cache = FieldDataCache([], course.id, request.user)\r\n about_module = get_module(\r\n request.user,\r\n request,\r\n loc,\r\n field_data_cache,\r\n course.id,\r\n not_found_ok=True,\r\n wrap_xmodule_display=False,\r\n static_asset_path=course.static_asset_path\r\n )\r\n\r\n html = ''\r\n\r\n if about_module is not None:\r\n try:\r\n html = about_module.render('student_view').content\r\n except Exception: # pylint: disable=broad-except\r\n html = render_to_string('courseware/error-message.html', None)\r\n log.exception(\r\n u\"Error rendering course={course}, section_key={section_key}\".format(\r\n course=course, section_key=section_key\r\n ))\r\n return html\r\n\r\n except ItemNotFoundError:\r\n log.warning(\r\n u\"Missing about section {key} in course {url}\".format(key=section_key, url=course.location.to_deprecated_string())\r\n )\r\n return None\r\n elif section_key == \"title\":\r\n return course.display_name_with_default\r\n elif section_key == \"university\":\r\n return course.display_org_with_default\r\n elif section_key == \"number\":\r\n return course.display_number_with_default\r\n\r\n raise KeyError(\"Invalid about key \" + str(section_key))",
"def parse_create_section(xml_course):\n\n attrs = [\n \"section\",\n 'crn',\n \"start-time\",\n \"end-time\",\n \"meeting-days\",\n \"location\",\n \"section-number\",\n \"instructor\"\n ]\n\n section = pull_attributes_from_xml(xml_course, attrs)\n\n section[\"places\"] = []\n\n # Create Place attribute pointer based on location string\n # Get places from Parse\n places = get_places()[\"results\"]\n # Get location info from section (of form [\"BRK 101\", \"TBA\"])\n all_locations = section[\"location\"].split(\", \")\n # Filter out TBA\n # TODO Maybe do something else with them\n locations = [location for location in all_locations if location != \"TBA\"]\n\n for location in locations:\n building_code = location.split(\" \")[0]\n for place in places:\n if place.get(\"symbol\") and place[\"symbol\"] == building_code:\n section[\"places\"].append(place[\"objectId\"])\n break;\n\n\n return section",
"def parseCourses(self, response):\n sel = Selector(response)\n courses = sel.xpath('//div[@class=\"course-info expandable\"]')\n for c in courses:\n item = CourseItem(response.request.meta[\"item\"])\n item['code'] += '-' + c.xpath('@id').get().strip()\n item['name'] = c.xpath('//a[@class=\"courselink\"]/text()').get().strip()\n # everything works up to here #\n href = c.xpath('div/h3/a/@href').get()\n url = urljoin('https://web-app.usc.edu', href)\n yield Request(url=url,callback=self.parseSection,meta={'item':item})",
"def _section_course_info(course_key, access):\r\n course = get_course_by_id(course_key, depth=None)\r\n\r\n section_data = {\r\n 'section_key': 'course_info',\r\n 'section_display_name': _('Course Info'),\r\n 'access': access,\r\n 'course_id': course_key,\r\n 'course_display_name': course.display_name,\r\n 'enrollment_count': CourseEnrollment.num_enrolled_in(course_key),\r\n 'has_started': course.has_started(),\r\n 'has_ended': course.has_ended(),\r\n 'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': course_key.to_deprecated_string()}),\r\n }\r\n\r\n try:\r\n advance = lambda memo, (letter, score): \"{}: {}, \".format(letter, score) + memo\r\n section_data['grade_cutoffs'] = reduce(advance, course.grade_cutoffs.items(), \"\")[:-2]\r\n except Exception:\r\n section_data['grade_cutoffs'] = \"Not Available\"\r\n # section_data['offline_grades'] = offline_grades_available(course_key)\r\n\r\n try:\r\n section_data['course_errors'] = [(escape(a), '') for (a, _unused) in modulestore().get_course_errors(course.id)]\r\n except Exception:\r\n section_data['course_errors'] = [('Error fetching errors', '')]\r\n\r\n return section_data",
"def course_index(request, course_key):\r\n course_module = _get_course_module(course_key, request.user, depth=3)\r\n lms_link = get_lms_link_for_item(course_module.location)\r\n sections = course_module.get_children()\r\n\r\n\r\n return render_to_response('overview.html', {\r\n 'context_course': course_module,\r\n 'lms_link': lms_link,\r\n 'sections': sections,\r\n 'course_graders': json.dumps(\r\n CourseGradingModel.fetch(course_key).graders\r\n ),\r\n 'new_section_category': 'chapter',\r\n 'new_subsection_category': 'sequential',\r\n 'new_unit_category': 'vertical',\r\n 'category': 'vertical'\r\n })",
"def _getSections(self):\r\n\r\n sections = self.cf.sections()\r\n return sections",
"def get_course_by_key_words(input):",
"def see_teaching_courses(self, username: str, token: str) -> List[Dict[str, object]]:\n\n # Validate user first\n if not self.validate(username=username, token=token, check_privilege='instructor'):\n raise RuntimeError(\"User not verified!\")\n\n # Get UID from user's username\n uid = self.get_uid(username=username)\n\n # Query database for courses instructed by a user with this UID\n cursor = self._db_connection.cursor()\n cursor.execute(\n '''\n SELECT \n course_id,\n course_abbreviation,\n course_name, \n time,\n seats \n FROM \n courses\n WHERE \n instructor_id = ?\n ;\n ''', (uid,))\n\n db_results = cursor.fetchall()\n\n if db_results is None:\n print(\"No associated courses found!\")\n return []\n\n # Build information dicts for every course this user is instructing\n courses = []\n for result in db_results:\n # Get the number of students enrolled in this course already\n cursor.execute('''SELECT COUNT(*) FROM enrollment_records WHERE course_id = ?;''', (result[0],))\n students_enrolled = cursor.fetchone()[0]\n if students_enrolled is None:\n students_enrolled = 0\n\n # Build a course dict from the data\n courses.append({\n \"course_abbreviation\": result[1],\n \"course_name\": result[2],\n \"time\": result[3],\n \"students_enrolled\": students_enrolled,\n \"capacity\": result[4],\n })\n\n return courses"
]
| [
"0.7577065",
"0.7576953",
"0.7424162",
"0.7382828",
"0.7236566",
"0.7176602",
"0.7086361",
"0.7010396",
"0.69594324",
"0.66986996",
"0.6623919",
"0.6595404",
"0.654556",
"0.6525249",
"0.65070343",
"0.650297",
"0.64985454",
"0.6495903",
"0.648368",
"0.6476749",
"0.64632314",
"0.6458993",
"0.6397824",
"0.6353624",
"0.6301966",
"0.62370294",
"0.6214678",
"0.6213978",
"0.6184562",
"0.618438"
]
| 0.7686416 | 0 |
Function to remove curriculum goals from the db | def remove_curriculum_goals(self, curriculum):
DELETE_CURRICULUM_GOALS = """DELETE FROM Goal WHERE curriculum_name = %s"""
try:
self.db_cursor.execute(DELETE_CURRICULUM_GOALS, (curriculum.name,))
self.db_connection.commit()
except:
logging.warning("DBAdapter: Error- Could not delete curriculum goals.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_course_goals(self, course):\n DELETE_COURSE_GOALS = \"\"\"DELETE FROM CourseGoals WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_GOALS, (course.name,))\n self.db_connection.commit()",
"def remove_curriculum(self, curriculum):\n DELETE_CURRICULUM = \"\"\"DELETE FROM Curriculum WHERE name = %s\"\"\"\n\n try:\n self.db_cursor.execute(DELETE_CURRICULUM, (curriculum.name,))\n self.db_connection.commit()\n except:\n logging.warning(\"DBAdapter: Error- Could not delete curriculum.\")",
"def remove_curriculum_courses(self, curriculum):\n DELETE_FROM_CURRICULUM_LISTINGS = \"\"\"DELETE FROM CurriculumListings WHERE curriculum_name = %s\"\"\"\n\n try:\n self.db_cursor.execute(DELETE_FROM_CURRICULUM_LISTINGS, (curriculum.name,))\n self.db_connection.commit()\n except:\n logging.warning(\"DBAdapter: Error- Could not delete curriculum courses.\")",
"def remove():\n\n db_remove()",
"def del_done():\n # This function works just like the deleting function\n c.execute(\"DELETE FROM activities WHERE status = 'done' AND Frequency != 'correct'\")\n conn.commit()",
"def remove(self, name):\r\n goals = self.goals()\r\n for goal in goals:\r\n if goal.name == name:\r\n goals.remove(goal)\r\n return self\r\n raise GoalError('Goal %s does not exist in this phase, members are: %s' % (name, goals))",
"def delete_goal(self, task_name):\r\n\t\twith self.conn:\r\n\t\t\tself.c.execute(\"\"\"DELETE FROM goals WHERE task = ?\"\"\", (task_name,))\r\n\t\treturn self.c.rowcount",
"def remove():",
"def remove_course_in_section_goal_grades(self, course):\n DELETE_COURSE_SECTION_GOAL_GRADES = \"\"\"DELETE FROM SectionGoalGrades WHERE course = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_SECTION_GOAL_GRADES, (course.name,))\n self.db_connection.commit()",
"def delete_savings_goal():\n\n current_goal = SavingsGoal.query.filter(\n SavingsGoal.user_id == str(current_user.id)).one_or_none()\n\n db.session.delete(current_goal)\n db.session.commit()\n\n flash('Savings goal was successfully deleted', 'warning')\n return redirect(url_for('savings.savings_display'))",
"def test_deleting_goal(self):\n\n delete_goal(1)\n self.assertIsNone(Goal.query.get(1))",
"def remove_skill_from_database(skillpath):\n to_remove = database_controller.get_subcategories(skillpath)\n subcategories_to_check = to_remove.copy()\n to_remove.append(skillpath)\n while subcategories_to_check:\n new_subcategories = database_controller.get_subcategories(subcategories_to_check.pop())\n to_remove.extend(new_subcategories)\n subcategories_to_check.extend(new_subcategories)\n for sub_path in reversed(to_remove):\n sid = database_controller.get_skill(sub_path).id\n Hierarchy.query.filter_by(parent_skill_id=sid).delete()\n Hierarchy.query.filter_by(child_skill_id=sid).delete()\n MilestoneAssociation.query.filter_by(milestone_skill_id=sid).delete()\n Association.query.filter_by(skill_id=sid).delete()\n # duplicate names WILL get removed here\n Skill.query.filter_by(path=sub_path).delete()\n db.session.commit()",
"def test_delete_goal(self):\n pass",
"def remove(self):",
"def deleteMatches():\n\n query = (\"DELETE FROM matches;\")\n results = executeQuery({'dbname': 'tournament', 'query' : query, 'type' : 'delete'})",
"def test_deleting_patient_goals(self):\n\n data = {\"goal\": 1}\n result = self.client.post(\"/delete-goal\", data=data)\n goal = Goal.query.get(1)\n\n self.assertEqual(result.status_code, 200)\n self.assertIsNone(goal)",
"def clear(cls):\r\n cls._goals_by_phase.clear()\r\n cls._phase_by_goal.clear()",
"def remove_tactic(self):\n tactic_removed = input(\"Enter a tactic to be removed: \")\n self.proof.tactics.remove(tactic_removed)\n for gene in self.population:\n gene.chromosome = [e for e in gene.chromosome if e != tactic_removed]",
"def delete_habit():\n analytics.remove_habit('Play Piano')",
"def remove_skill(username, skillpath):\n to_remove = database_controller.get_subcategories(skillpath, username=username)\n subcategories_to_check = to_remove.copy()\n to_remove.append(skillpath)\n uid = database_controller.get_user(username).id\n while subcategories_to_check:\n new_subcategories = database_controller.get_subcategories(subcategories_to_check.pop())\n to_remove.extend(new_subcategories)\n subcategories_to_check.extend(new_subcategories)\n for sub_path in reversed(to_remove):\n sid = database_controller.get_skill(sub_path).id\n MilestoneAssociation.query.filter_by(milestone_skill_id=sid, milestone_users_id=uid).delete()\n Association.query.filter_by(skill_id=sid, users_id=uid).delete()\n db.session.commit()",
"def remove_course_in_curriculum_listings(self, course):\n DELETE_CURRICULUM_COURSES = \"\"\"DELETE FROM CurriculumListings WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_CURRICULUM_COURSES, (course.name,))\n self.db_connection.commit()",
"def data_cleaning():\n conn = get_connect()\n conn.execute(\"DELETE FROM championMatchData WHERE kills < 2 AND deaths < 2 AND assists < 2\")\n conn.commit()\n conn.close()\n return",
"def deleteMatches():\n # gets connection to tournament database in conn object\n conn = connect()\n # gets the cursor to execute queries\n c = conn.cursor()\n # executes delete query to delete all records in MATCH table\n c.execute(\"DELETE FROM MATCH;\")\n # commits the changes perform on MATCH table after delete statement executes\n conn.commit()\n # closes the connection to tournament database\n conn.close()",
"def deleteMatches():\n c.execute(\"DELETE FROM matchup\");\n print \"All matches have been successfully deleted\"\n return",
"def deleteMatches():\n #deletes the contents of table matches\n DB().execute(\"DELETE FROM matches\", True)",
"def deleteMatches():\n cursor.execute(\"\"\"delete from matches\"\"\")",
"def deleteMatches():\n db = connect()\n c = db.cursor()\n query = (\"DELETE FROM results;\")\n c.execute(query)\n db.commit()\n db.close()",
"def remove(name):\n del person_database[name]",
"def deleteMatches():\n DB = dbc()\n DB.cursor().execute('DELETE FROM matches')\n DB.commit()\n DB.close()",
"def remove():\n pass"
]
| [
"0.7025989",
"0.66307485",
"0.6243633",
"0.62221265",
"0.61730474",
"0.6141893",
"0.6110971",
"0.6082119",
"0.6014006",
"0.597867",
"0.5966083",
"0.5900256",
"0.58493364",
"0.58166784",
"0.5808444",
"0.5784192",
"0.57757366",
"0.5770922",
"0.57061917",
"0.5692397",
"0.5661966",
"0.5658837",
"0.56425744",
"0.56207746",
"0.56055427",
"0.55973905",
"0.5544684",
"0.55414116",
"0.55412406",
"0.55210584"
]
| 0.831663 | 0 |
Function to remove curriculum topics from the db | def remove_curriculum_topics(self, curriculum):
DELETE_FROM_CURRICULUM_TOPICS = """DELETE FROM CurriculumTopics WHERE curriculum_name = %s"""
try:
self.db_cursor.execute(DELETE_FROM_CURRICULUM_TOPICS, (curriculum.name,))
self.db_connection.commit()
except:
logging.warning("DBAdapter: Error- Could not delete curriculum topics.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_course_topics(self, course):\n DELETE_COURSE_TOPICS = \"\"\"DELETE FROM CourseTopics WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_TOPICS, (course.name,))\n self.db_connection.commit()",
"def remove_topic ( topics , level = ROOT.RooFit.INFO , stream = -1 ) :\n return Ostap.Utils.RemoveTopic ( topics , level , stream )",
"def delete_topic():\n return dict()",
"def remove_topics(self, project: str, *topics: str):\n assert self.exists(project), f'Project {project} inesistente'\n\n return self.collection.find_one_and_update(\n {\n 'url': project\n },\n {\n '$pull': {\n 'topics': {\n '$in': topics,\n }\n }\n }\n )",
"def wipe_all_topics(self):\n # doc_count = self.posts_read.find({'subreddit':self.subreddit, 'postwise.topic_assignment':{'$exists':True}}).count()\n doc_count = self.posts_write.update({'subreddit':self.subreddit, 'postwise.topic_assignment':{'$exists':True}},\n {'$unset':{'postwise.topic_distro':True,'postwise.topic_assignment':True}}, multi=True)\n\n print 'wiped topics from %i documents' % doc_count['nModified']",
"def remove():\n\n db_remove()",
"def remove(cls, callback, topic):\n\t\tkey_name = cls.create_key_name(callback, topic)\n\t\tdef txn():\n\t\t\tsub = cls.get_by_key_name(key_name)\n\t\t\tif sub is not None:\n\t\t\t\tsub.delete()\n\t\t\t\treturn True\n\t\t\treturn False\n\t\treturn db.run_in_transaction(txn)",
"def test_delete_topic_courses(self):\r\n course_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.delete_topic_courses(topic_id, course_id)",
"def remove(request, word_to_remove):\n\n word_object = Word.objects.get(word__exact=word_to_remove)\n word_to_learn = WordsToLearn.objects.filter(\n user__id=request.user.id, word=word_object)\n word_to_learn.delete()\n return HttpResponseRedirect('/study')",
"def cleartopics(self):\n\n # Clear previous topics, if any\n if self.topics:\n for uid in self.scan():\n self.removeattribute(uid, \"topic\")\n self.removeattribute(uid, \"topicrank\")\n\n if self.categories:\n self.removeattribute(uid, \"category\")\n\n self.topics, self.categories = None, None",
"def remove_curriculum_courses(self, curriculum):\n DELETE_FROM_CURRICULUM_LISTINGS = \"\"\"DELETE FROM CurriculumListings WHERE curriculum_name = %s\"\"\"\n\n try:\n self.db_cursor.execute(DELETE_FROM_CURRICULUM_LISTINGS, (curriculum.name,))\n self.db_connection.commit()\n except:\n logging.warning(\"DBAdapter: Error- Could not delete curriculum courses.\")",
"def cleanup(self):\n # Removing the ROS system wide advert about which topic are interfaced with this process\n # TODO : lock this for concurrent access\n if_topics = rospy.get_param('~' + TopicBack.IF_TOPIC_PARAM, [])\n if_topics.remove(self.fullname)\n rospy.set_param('~' + TopicBack.IF_TOPIC_PARAM, if_topics)\n\n # cleanup pub and sub, so we can go through another create / remove cycle properly\n self._remove_pub(self.pub)\n self._remove_sub(self.sub)",
"def remove():",
"def remove_curriculum(self, curriculum):\n DELETE_CURRICULUM = \"\"\"DELETE FROM Curriculum WHERE name = %s\"\"\"\n\n try:\n self.db_cursor.execute(DELETE_CURRICULUM, (curriculum.name,))\n self.db_connection.commit()\n except:\n logging.warning(\"DBAdapter: Error- Could not delete curriculum.\")",
"def test_topic_delete(topic):\n assert topic.user.post_count == 1\n assert topic.post_count == 1\n assert topic.forum.topic_count == 1\n assert topic.forum.post_count == 1\n\n topic.delete()\n\n forum = Forum.query.filter_by(id=topic.forum_id).first()\n user = User.query.filter_by(id=topic.user_id).first()\n topic = Topic.query.filter_by(id=topic.id).first()\n\n assert topic is None\n assert user.post_count == 0\n assert forum.topic_count == 0\n assert forum.post_count == 0\n assert forum.last_post_id is None",
"def suppress_topics ( *topics ) :\n if topics and 1 == len( topics ) :\n t = str ( topics [ 0 ] ).lower()\n if 'config' == t : return suppress_topics() \n\n if not topics :\n newtopics = [] \n import ostap.core.config as CONFIG\n if 'RooFit' in CONFIG.config :\n import string\n ws = string.whitespace \n node = CONFIG.config [ 'RooFit' ]\n data = node.get('RemoveTopics','(,)' )\n topics = tuple ( i.strip ( ws ) for i in data.split ( ',' ) if i.strip ( ws ) ) \n \n if topics : \n svc = ROOT.RooMsgService.instance()\n svc.saveState () \n topic = msg_topic ( *topics ) \n num = svc.numStreams()\n for i in range ( num ) : ok = Ostap.Utils.remove_topic ( i , topic )",
"def deleteTopic():\n\n data = request.json\n if \"agenda_id\" in data and \"section_position\" in data and \"topic_position\" in data:\n if connectMongo.getAgendaById(data.get(\"agenda_id\")).found:\n responseWrapper: ResponseWrapper = connectMongo.deleteTopic(data.get(\"agenda_id\"),\n data.get(\"section_position\"),\n data.get(\"topic_position\"))\n if responseWrapper.operationDone:\n return jsonify(response=200, agenda=responseWrapper.object.makeJson())\n else:\n return jsonify(response=501, msg=\"Delete Failed\")\n else:\n return jsonify(response=404, msg=\"Agenda not found\")\n else:\n return jsonify(response=400, msg=\"you didn't sent all the necessary information\")",
"def test_unsubscribe_from_topic_courses(self):\r\n course_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.unsubscribe_from_topic_courses(topic_id, course_id)",
"def delete_old_news():\n # Configure the connection to the database\n client = MongoClient(os.environ['MongoDB_URI'])\n # client = MongoClient('localhost', 27017)\n db = client['kenya-news'] # Select the database\n collection = db.news\n time_boundary = datetime.now() - timedelta(hours=48)\n print(time_boundary.isoformat())\n collection.remove({'$or': [\n {'date': {'$lt': time_boundary.isoformat()}},\n {'date': {'$eq': 0}}\n ]})",
"def delete():\n id = request.data\n # Build a pymongo command to delete the document by _id. Only executes if active is set to True.\n active = True\n mode = request.headers[\"mode\"]\n client = MongoClient(db_config)\n if active == True:\n # Switch mode\n if request.headers[\"mode\"] == \"deleteCollectionNode\":\n db = client['Corpus']\n node = db['Corpus']\n # elif request.headers[\"mode\"] == \"something else:\n # db = client['Something']\n # node = db['Something']\n else:\n db = client['Publications']\n node = db['Publications']\n node.remove({\"_id\": id})\n # Return the Ajax response\n return \"Success.\"",
"def validate_new_curriculum_topics(self, curriculum_topics):\n\n for cur in curriculum_topics:\n # check to make sure its in the general topics table\n self.db_cursor.execute(\"\"\"SELECT COUNT(*) FROM Topic WHERE name = %s\"\"\", (cur,))\n ct = self.db_cursor.fetchone()\n ct = ct[0]\n if ct == 0:\n print(\"topic does not exist, we must create new one or cancel\") # todo\n\n return True",
"def removeall(subdomain):\n\tTarget.query.filter(Target.subdomain.like(f\"%{subdomain}%\")).delete(synchronize_session='fetch')\n\tdb.session.commit()\n\tprint(\"deleted\",sub)",
"def deleteTopic(self, topic):\n self.deleteTopics((topic,))",
"def clean_test_topics(prefix=TEST_NAME_PREFIX, region_name=None):\n sns = boto3.resource('sns', region_name=region_name)\n num_topics = 0\n try:\n for topic in sns.topics.all():\n if re.match(r'.+%s\\d+' % TEST_NAME_PREFIX, topic.arn):\n topic.delete()\n num_topics += 1\n finally:\n log.info('deleted %s test topics' % num_topics)",
"def delete():",
"def clear_subjects(db):\n\t\n\tfor p_hash, p in db.all_papers.items():\n\t\tif p.subject:\n\t\t\tp.subject = None",
"def deleting_old_news() -> None:\n\n with app.app_context():\n delete_news_from_db()",
"def solr_delete(instances):\n __solr_prepare(instances)",
"def test_forum_delete_with_user_and_topic(topic, user):\n assert user.post_count == 1\n\n topic.forum.delete([user])\n\n forum = Forum.query.filter_by(id=topic.forum_id).first()\n\n assert forum is None\n\n assert user.post_count == 0",
"def cleanup(self):\n deletes = []\n for item in self._collect.find({'status': 'started'}, {'_id': True}):\n deletes.append(pymongo.DeleteOne(item))\n # Remove them\n if len(deletes):\n print(\"Delete\", self._collect.bulk_write(deletes).deleted_count)"
]
| [
"0.6923057",
"0.67551494",
"0.6577512",
"0.6558305",
"0.61510783",
"0.61396134",
"0.60660744",
"0.6024087",
"0.59879607",
"0.5971833",
"0.5942124",
"0.58776575",
"0.5843289",
"0.58229035",
"0.5818467",
"0.58093405",
"0.5767144",
"0.57314086",
"0.5695167",
"0.5674001",
"0.56649786",
"0.5664592",
"0.5641208",
"0.56401306",
"0.55970144",
"0.55910903",
"0.5564134",
"0.5551615",
"0.55443215",
"0.5483354"
]
| 0.82465684 | 0 |
Function to remove curriculum courses from the db | def remove_curriculum_courses(self, curriculum):
DELETE_FROM_CURRICULUM_LISTINGS = """DELETE FROM CurriculumListings WHERE curriculum_name = %s"""
try:
self.db_cursor.execute(DELETE_FROM_CURRICULUM_LISTINGS, (curriculum.name,))
self.db_connection.commit()
except:
logging.warning("DBAdapter: Error- Could not delete curriculum courses.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_course_in_curriculum_listings(self, course):\n DELETE_CURRICULUM_COURSES = \"\"\"DELETE FROM CurriculumListings WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_CURRICULUM_COURSES, (course.name,))\n self.db_connection.commit()",
"def remove_curriculum(self, curriculum):\n DELETE_CURRICULUM = \"\"\"DELETE FROM Curriculum WHERE name = %s\"\"\"\n\n try:\n self.db_cursor.execute(DELETE_CURRICULUM, (curriculum.name,))\n self.db_connection.commit()\n except:\n logging.warning(\"DBAdapter: Error- Could not delete curriculum.\")",
"def delete():\n Course.print_all_crs()\n course_name = input(\"Please, type course name >\")\n c = Course(course_name)\n if c.is_course_exists():\n db = Course._file.read_db()\n for crs_i in range(len(db[\"courses\"])):\n if db[\"courses\"][crs_i][\"course_name\"] == course_name:\n del db[\"courses\"][crs_i]\n break\n Course._file.write_db(db)\n print(\"{} course is deleted\".format(course_name))\n else:\n print(\"Failed. {} course does not exist\".format(course_name))",
"def remove_course(self, course):\n DELETE_COURSE = \"\"\"DELETE FROM Course WHERE name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE, (course.name,))\n self.db_connection.commit()",
"def remove_empty_courses(self):\n pass",
"def remove_course_in_section(self, course):\n DELETE_COURSE_SECTIONS = \"\"\"DELETE FROM Section WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_SECTIONS, (course.name,))\n self.db_connection.commit()",
"def remove_course(roster, student, course):\r\n roster[student].remove(course)",
"def remove_courses(self, *course_keys):\r\n entries = CourseAccessRole.objects.filter(user=self.user, role=self.role, course_id__in=course_keys)\r\n entries.delete()\r\n if hasattr(self.user, '_roles'):\r\n del self.user._roles",
"def test_removing_course(self):\n a_user = User.objects.create(first_name=\"2\", last_name=\"test\", username=\"test\")\n user = VSBUser.objects.create(user=a_user)\n inst = Institution.objects.create(name = \"UVA\")\n c1 = Course.objects.create(name = \"CS 3240\", institution= inst)\n c2 = Course.objects.create(name = \"CS 1110\", institution= inst)\n\n user.add_course(c1)\n user.add_course(c2)\n user.remove_course(c1)\n\n expected = 1\n\n received = len(user.get_courses())\n\n self.assertEqual(received, expected, msg=\"Course_And_Topic.removing_course: Removing courses failed.\")",
"def remove_curriculum_topics(self, curriculum):\n DELETE_FROM_CURRICULUM_TOPICS = \"\"\"DELETE FROM CurriculumTopics WHERE curriculum_name = %s\"\"\"\n\n try:\n self.db_cursor.execute(DELETE_FROM_CURRICULUM_TOPICS, (curriculum.name,))\n self.db_connection.commit()\n except:\n logging.warning(\"DBAdapter: Error- Could not delete curriculum topics.\")",
"def remove_curriculum_goals(self, curriculum):\n DELETE_CURRICULUM_GOALS = \"\"\"DELETE FROM Goal WHERE curriculum_name = %s\"\"\"\n\n try:\n self.db_cursor.execute(DELETE_CURRICULUM_GOALS, (curriculum.name,))\n self.db_connection.commit()\n except:\n logging.warning(\"DBAdapter: Error- Could not delete curriculum goals.\")",
"def remove_course(self, term, schedule, crn):\n query = {'Term': term.code,\n 'Schedule': schedule,\n 'CourseID': crn,\n 'ShowDebug': 0,\n '_': int(float(time.time()) * 10**3)}\n\n self.get(self.REMOVE_COURSE_ENDPOINT, params=query)",
"def remove_course(self, course):\n if course == isinstance(course, list):\n for c in course:\n if c in self.courses:\n self.courses.remove(c)\n else:\n self.courses.remove(course)",
"def remove_course(self, key: str):\n if key in self._courses:\n self._total_load -= self._courses[key].credit_load\n self._total_diff -= self._courses[key].difficulty\n del self._courses[key]\n if len(self._courses) > 0:\n self._diff_rating = self._total_diff / len(self._courses)\n else:\n self._diff_rating = 0.0\n return True\n return False",
"def _rm_edx4edx(self):\r\n def_ms = modulestore()\r\n course_path = '{0}/edx4edx_lite'.format(\r\n os.path.abspath(settings.DATA_DIR))\r\n try:\r\n # using XML store\r\n course = def_ms.courses.get(course_path, None)\r\n except AttributeError:\r\n # Using mongo store\r\n course = def_ms.get_course(SlashSeparatedCourseKey('MITx', 'edx4edx', 'edx4edx'))\r\n\r\n # Delete git loaded course\r\n response = self.client.post(\r\n reverse('sysadmin_courses'),\r\n {\r\n 'course_id': course.id.to_deprecated_string(),\r\n 'action': 'del_course',\r\n }\r\n )\r\n self.addCleanup(self._rm_glob, '{0}_deleted_*'.format(course_path))\r\n\r\n return response",
"def remove_course_in_section_grades(self, course):\n DELETE_COURSE_SECTION_GRADES = \"\"\"DELETE FROM SectionGrades WHERE course = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_SECTION_GRADES, (course.name,))\n self.db_connection.commit()",
"def remove_course_goals(self, course):\n DELETE_COURSE_GOALS = \"\"\"DELETE FROM CourseGoals WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_GOALS, (course.name,))\n self.db_connection.commit()",
"def delete_course(self, course_key, user_id=None):\r\n index = self.db_connection.get_course_index(course_key)\r\n if index is None:\r\n raise ItemNotFoundError(course_key)\r\n # this is the only real delete in the system. should it do something else?\r\n log.info(u\"deleting course from split-mongo: %s\", course_key)\r\n self.db_connection.delete_course_index(index)",
"def test_delete_entry_courses(self):\r\n course_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n id = None # Change me!!\r\n\r\n r = self.client.delete_entry_courses(id, topic_id, course_id)",
"def remove_course_in_section_goal_grades(self, course):\n DELETE_COURSE_SECTION_GOAL_GRADES = \"\"\"DELETE FROM SectionGoalGrades WHERE course = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_SECTION_GOAL_GRADES, (course.name,))\n self.db_connection.commit()",
"def remove():\n\n db_remove()",
"def test_mongo_course_add_delete(self):\r\n\r\n self._setstaff_login()\r\n self._mkdir(getattr(settings, 'GIT_REPO_DIR'))\r\n\r\n def_ms = modulestore()\r\n self.assertFalse(isinstance(def_ms, XMLModuleStore))\r\n\r\n self._add_edx4edx()\r\n course = def_ms.get_course(SlashSeparatedCourseKey('MITx', 'edx4edx', 'edx4edx'))\r\n self.assertIsNotNone(course)\r\n\r\n self._rm_edx4edx()\r\n course = def_ms.get_course(SlashSeparatedCourseKey('MITx', 'edx4edx', 'edx4edx'))\r\n self.assertIsNone(course)",
"def test_delete_grading_period_courses(self):\r\n course_id = None # Change me!!\r\n id = None # Change me!!\r\n\r\n r = self.client.delete_grading_period_courses(id, course_id)",
"def remove_course_topics(self, course):\n DELETE_COURSE_TOPICS = \"\"\"DELETE FROM CourseTopics WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_TOPICS, (course.name,))\n self.db_connection.commit()",
"def create_course_for_deletion(self):\r\n course = modulestore().create_course('nihilx', 'deletion', 'deleting_user')\r\n root = course.location.version_agnostic().for_branch('draft')\r\n for _ in range(4):\r\n self.create_subtree_for_deletion(root, ['chapter', 'vertical', 'problem'])\r\n return modulestore().get_item(root)",
"def delete_course(self, course_key, user_id): # lint-amnesty, pylint: disable=arguments-differ\n # this is the only real delete in the system. should it do something else?\n log.info(\"deleting course from split-mongo: %s\", course_key)\n self.delete_course_index(course_key)\n\n # We do NOT call the super class here since we need to keep the assets\n # in case the course is later restored.\n # super(SplitMongoModuleStore, self).delete_course(course_key, user_id)\n\n self._emit_course_deleted_signal(course_key)",
"def validate_new_curriculum_courses(self, curriculum_courses):\n\n for cur in curriculum_courses:\n # check to make sure its in the general courses table\n self.db_cursor.execute(\"\"\"SELECT COUNT(*) FROM Course WHERE name = %s\"\"\", (cur,))\n ct = self.db_cursor.fetchone()\n ct = ct[0]\n if ct == 0:\n print(\"course does not exist, we must create new one or cancel\") # todo\n\n return True",
"def delete_course(course_id: int, db: Session = Depends(get_db)):\n\n try:\n crud.course.delete(db, obj_id=course_id)\n except Exception as error:\n logger.error(f'{error}')\n raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f'{error}')\n\n return Response(status_code=status.HTTP_204_NO_CONTENT)",
"def test_course_delete(self, app, auth):\n app.admin.add_new_course()\n course_data = CreateCourse.random()\n app.course.create_course(course_data)\n app.admin.manage_courses()\n app.course.delete_course()\n assert (course_data.short_course_name in app.course.sure_delete()), \\\n \"The course was not deleted!\"",
"def remove_course(self, key: str, index: int):\n if index >= len(self._semesters):\n raise IndexError(\"Index given was beyond the bounds of self._semesters\")\n return self._semesters[index].remove_course(key)"
]
| [
"0.7996696",
"0.7515396",
"0.736457",
"0.728052",
"0.68510836",
"0.6823539",
"0.6731365",
"0.66936284",
"0.6648279",
"0.66253513",
"0.66170394",
"0.6479724",
"0.6462396",
"0.6457004",
"0.63306725",
"0.6330107",
"0.6312556",
"0.63116014",
"0.62037617",
"0.61183065",
"0.6117102",
"0.61016333",
"0.6097363",
"0.6046386",
"0.6045084",
"0.60334754",
"0.5968658",
"0.59416676",
"0.59400046",
"0.5907929"
]
| 0.83684266 | 0 |
Function to remove curriculum from the db | def remove_curriculum(self, curriculum):
DELETE_CURRICULUM = """DELETE FROM Curriculum WHERE name = %s"""
try:
self.db_cursor.execute(DELETE_CURRICULUM, (curriculum.name,))
self.db_connection.commit()
except:
logging.warning("DBAdapter: Error- Could not delete curriculum.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_curriculum_courses(self, curriculum):\n DELETE_FROM_CURRICULUM_LISTINGS = \"\"\"DELETE FROM CurriculumListings WHERE curriculum_name = %s\"\"\"\n\n try:\n self.db_cursor.execute(DELETE_FROM_CURRICULUM_LISTINGS, (curriculum.name,))\n self.db_connection.commit()\n except:\n logging.warning(\"DBAdapter: Error- Could not delete curriculum courses.\")",
"def remove_curriculum_goals(self, curriculum):\n DELETE_CURRICULUM_GOALS = \"\"\"DELETE FROM Goal WHERE curriculum_name = %s\"\"\"\n\n try:\n self.db_cursor.execute(DELETE_CURRICULUM_GOALS, (curriculum.name,))\n self.db_connection.commit()\n except:\n logging.warning(\"DBAdapter: Error- Could not delete curriculum goals.\")",
"def remove_course_in_curriculum_listings(self, course):\n DELETE_CURRICULUM_COURSES = \"\"\"DELETE FROM CurriculumListings WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_CURRICULUM_COURSES, (course.name,))\n self.db_connection.commit()",
"def remove():\n\n db_remove()",
"def remove_curriculum_topics(self, curriculum):\n DELETE_FROM_CURRICULUM_TOPICS = \"\"\"DELETE FROM CurriculumTopics WHERE curriculum_name = %s\"\"\"\n\n try:\n self.db_cursor.execute(DELETE_FROM_CURRICULUM_TOPICS, (curriculum.name,))\n self.db_connection.commit()\n except:\n logging.warning(\"DBAdapter: Error- Could not delete curriculum topics.\")",
"def remove():",
"def delete_this_region(self):",
"def remove(self):",
"def delete():\n Course.print_all_crs()\n course_name = input(\"Please, type course name >\")\n c = Course(course_name)\n if c.is_course_exists():\n db = Course._file.read_db()\n for crs_i in range(len(db[\"courses\"])):\n if db[\"courses\"][crs_i][\"course_name\"] == course_name:\n del db[\"courses\"][crs_i]\n break\n Course._file.write_db(db)\n print(\"{} course is deleted\".format(course_name))\n else:\n print(\"Failed. {} course does not exist\".format(course_name))",
"def remove_data(self):\n db.session.delete(self)\n db.session.commit( )",
"def remove():\n pass",
"def remove(self):\n db.session.delete(self)\n db.session.commit()",
"def remove(name):\n del person_database[name]",
"def sr_remove_c():\n req_data = request.get_json()\n logging.debug(\"req_data = \" + str(req_data))\n\n product_name = req_data['product_name']\n version_number = req_data['version_number']\n name = req_data['name']\n version = req_data['version']\n destination = req_data['destination']\n\n outcome = {\"name\": \"Fail\"}\n\n try:\n # create new association\n c_id = Component.query.filter_by(name=name, version=version).first().id\n sr_id = SoftwareRelease.query.filter_by(product_name=product_name, version_number=version_number).first().id\n a = Association.query.filter_by(software_release_id=sr_id, component_id=c_id, destination=destination).first()\n\n db.session.delete(a)\n\n db.session.commit()\n outcome['name'] = \"Success\"\n except:\n db.session.rollback()\n raise\n finally:\n db.session.close()\n return jsonify(outcome)",
"def del_car(matricula):\n global max\n con = lite.connect('parking.db')\n cur = con.cursor()\n if(_formatMatriculaValid(matricula)):\n try:\n cur.execute(\"DELETE FROM cotxes WHERE id_cotxe=?\",(matricula,))\n cur.execute(\"DELETE FROM parking WHERE id_cotxe=?\",(matricula,))\n con.commit()\n max +=1\n except lite.IntegrityError:\n print \"Error.lelele\"\n else:\n print(\"Format matricula invalid.\")\n con.close()",
"def remove_course(self, course):\n DELETE_COURSE = \"\"\"DELETE FROM Course WHERE name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE, (course.name,))\n self.db_connection.commit()",
"def remove_current():\n current.remove()",
"def cleanUp(name):\n clovr = pymongo.Connection().clovr\n clovr.clusters.remove(dict(name=name))",
"def delete():",
"def deletePlayers():\n db, cursor = connect()\n cursor.execute(\"DELETE FROM players\") \n db.commit() \n db.close()",
"def delete_row():\r\n if request.method == 'POST':\r\n d = json.loads(request.data)\r\n data = (d['sr'],)\r\n g.db.execute(\"DELETE FROM monthly_data WHERE Sr = %s\", data)\r\n mysql.connection.commit()\r\n return d\r\n else:\r\n pass",
"def remove(name):",
"def remove(self):\n traci.vehicle.remove(self.id)",
"def remove(self):\n traci.vehicle.remove(self.id)",
"def _remove(self):\n pass",
"def removeNextPose():\n\n data = request.get_json() # data = {'pose_id': 2, 'nextposeid': 12, 'weight': 1}\n print(data)\n if data['nextposeid']:\n pose = Pose.query.get(data['pose_id'])\n next_poses = pose.next_poses\n del next_poses[data['nextposeid']]\n flag_modified(pose, 'next_poses')\n db.session.commit()\n\n url = '/pose/' + str(poseid)\n return redirect(url)",
"def remove_skill_from_database(skillpath):\n to_remove = database_controller.get_subcategories(skillpath)\n subcategories_to_check = to_remove.copy()\n to_remove.append(skillpath)\n while subcategories_to_check:\n new_subcategories = database_controller.get_subcategories(subcategories_to_check.pop())\n to_remove.extend(new_subcategories)\n subcategories_to_check.extend(new_subcategories)\n for sub_path in reversed(to_remove):\n sid = database_controller.get_skill(sub_path).id\n Hierarchy.query.filter_by(parent_skill_id=sid).delete()\n Hierarchy.query.filter_by(child_skill_id=sid).delete()\n MilestoneAssociation.query.filter_by(milestone_skill_id=sid).delete()\n Association.query.filter_by(skill_id=sid).delete()\n # duplicate names WILL get removed here\n Skill.query.filter_by(path=sub_path).delete()\n db.session.commit()",
"def test_delete(self):\n c = city.City(name=\"Freiburg\")\n p1 = city.Citizen(name=\"Peter\")\n p2 = city.Citizen(name=\"Georg\")\n p3 = city.Citizen(name=\"Hans\")\n c.add(p1, p2, p3, rel=city.hasInhabitant)\n\n with DataspaceSession(URI) as session:\n wrapper = city.CityWrapper(session=session)\n cw = wrapper.add(c)\n session.commit()\n\n cw.remove(p3.uid)\n session.prune()\n session.commit()\n\n check_state(self, c, p1, p2, db=DB)",
"def delete_salary_group(db:Session):\n pass",
"def remove(self):\n self._switch.odlclient._request(self._path, method=\"delete\")"
]
| [
"0.7520537",
"0.7123004",
"0.69520813",
"0.64106256",
"0.6309485",
"0.60888255",
"0.5906614",
"0.58402205",
"0.5753388",
"0.56972307",
"0.5633587",
"0.55964035",
"0.559353",
"0.55837995",
"0.55369216",
"0.5527404",
"0.549224",
"0.54792136",
"0.5443087",
"0.53939295",
"0.5383188",
"0.5376762",
"0.5354552",
"0.5354552",
"0.5351613",
"0.53412026",
"0.53305495",
"0.53206706",
"0.53132313",
"0.5305753"
]
| 0.84933627 | 0 |
Function to edit a course in the db | def edit_course(self, course):
EDIT_COURSE = """UPDATE Course SET subject_code = %s, credit_hours = %s, description = %s WHERE name = %s"""
self.db_cursor.execute(EDIT_COURSE, (
course.subject_code, course.credit_hours, course.description, course.name))
self.db_connection.commit()
DELETE_COURSE_TOPICS = """DELETE FROM CourseTopics WHERE course_name = %s"""
self.db_cursor.execute(DELETE_COURSE_TOPICS, (course.name,))
self.db_connection.commit()
INSERT_COURSE_TOPICS = """INSERT INTO CourseTopics (course_name, topic_id) VALUES (%s, %s)"""
for ct in course.topics:
self.db_cursor.execute(INSERT_COURSE_TOPICS, (course.name,ct))
self.db_connection.commit()
DELETE_COURSE_GOALS = """DELETE FROM CourseGoals WHERE course_name = %s"""
self.db_cursor.execute(DELETE_COURSE_GOALS, (course.name,))
self.db_connection.commit()
INSERT_COURSE_GOALS = """INSERT INTO CourseGoals (course_name, goal_id) VALUES (%s, %s)"""
for cg in course.goals:
self.db_cursor.execute(INSERT_COURSE_GOALS, (course.name, cg))
self.db_connection.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update(request):\n\tcourse_id = request.GET.get('course_id')\n\tif request.method == 'POST':\n\t\tcourse_title = request.POST['course_title']\n\t\tinstitute_name = request.POST['institute_name']\n\t\tcourse_desc = request.POST['course_desc']\n\t\tcurrent_data = Course.objects.get(course_id = course_id)\n\t\tcurrent_data.course_title = course_title\n\t\tcurrent_data.institute_name = institute_name\n\t\tcurrent_data.course_desc = course_desc\n\t\tcurrent_data.save()\n\t\treturn HttpResponseRedirect(reverse('courseapp:index'))\n\tdata = Course.objects.get(course_id = course_id)\n\treturn render(request,'update.html',{'data':data})",
"def partial_update_course(course_id: int, course_request: schemas.CourseRequestPartial, db: Session = Depends(get_db)):\n\n course = crud.course.get(db, obj_id=course_id)\n\n if not course:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f'Course {course_id} not found')\n\n try:\n return crud.course.update(db, db_obj=course, obj_in=course_request)\n except Exception as error:\n logger.error(f'{error}')\n raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f'{error}')",
"def update_course(self):\n # ensure that updating course is exists\n if self.is_course_exists():\n db = Course._file.read_db()\n for crs_i in range(len(db[\"courses\"])):\n if db[\"courses\"][crs_i][\"course_name\"] == self._course_name:\n\n # ensuring that user does not provided less number of limited places\n if db[\"courses\"][crs_i][\"total_place\"] > self._total_place:\n print(\"{} course's limited places number must be more than {}\".format(\n self._course_name,\n db[\"courses\"][crs_i][\"total_place\"]\n ))\n return\n\n db[\"courses\"][crs_i][\"teacher\"] = self._teacher\n db[\"courses\"][crs_i][\"total_place\"] = self._total_place\n break\n self._file.write_db(db)\n print(\"The course - {} is updated\".format(self._course_name))\n return self.get_course().course_info()",
"def edit_entry():\n if not check_admin_logged() :\n abort(403)\n\n title = request.form['title']\n category = request.form['category']\n buydate = request.form['buydate']\n ssid = decrypt_book_record(request.form['ssid'])\n\n if not check_items_in_form(title, category, buydate):\n return redirect(url_for('show_entries_admin'))\n\n edited_entry = Entries.query.filter_by(\n id=ssid, title=title, category=category, \\\n buydate=buydate).first()\n\n if edited_entry is not None :\n edited_entry.introduction = request.form['introduction']\n if db.session.is_modified(edited_entry) :\n # commit only if something is modified\n try :\n db.session.commit()\n except IntegrityError as e :\n log_error('error when edit:')\n log_error(e.message)\n flash(u'数据库操作失败导致更新失败!请看后台日志')\n flash(u'成功更新条目')\n\n return redirect(url_for('show_entries_admin'))",
"def edit(self, **kwargs):\n ...",
"def edit():",
"def update_course(course):\r\n store = editable_modulestore()\r\n store.update_item(course, '**replace_user**')\r\n updated_course = store.get_course(course.id)\r\n return updated_course",
"def adminedit(object, id):\n\n db = get_db()\n\n if request.method == \"POST\":\n execute_string = 'UPDATE ' + object.title() + \" SET \"\n\n if object == 'post':\n execute_string += 'title = \"' + request.form['title'] + '\", content = \"' + request.form['content'] + '\", authorId = ' + request.form[\"authorid\"] + ', categoryId = ' + request.form[\"categoryid\"] + ''\n elif object == 'author':\n execute_string += 'name = \"' + request.form['name'] + '\"'\n elif object == 'category':\n execute_string += 'name = \"' + request.form['name'] + '\", description = \"' + request.form['description'] + '\"'\n\n execute_string += \" WHERE id = \" + str(id)\n db.execute(execute_string)\n db.commit()\n return redirect(url_for(\"adminview\", object=object))\n\n execute_string = \"SELECT * FROM \" + object.title() + \" WHERE id = \" + str(id)\n item = db.execute(execute_string).fetchone()\n\n return render_template(\"new.html\", object=object, item=item)",
"def save_course(self):\r\n self.course.save()\r\n self.store.update_item(self.course, self.user.id)",
"def edit(self):\n\n pass",
"def edit():\n database.ask(mode='single')\n F = database.check(single=True)\n if F and hasattr(F,'edit'):\n name = database[0]\n F.edit(name)",
"def edit_cert(cert_id):\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n cert = Certs.query.get_or_404(cert_id)\n form = Cert_Form(obj=cert)\n\n if form.validate_on_submit():\n cert.cert_name = form.cert_name.data\n cert.hours = form.hours.data\n cert.is_required = form.is_required.data\n cert.expire = form.expire.data\n cert.good_for_time = form.good_for_time.data\n cert.good_for_unit = form.good_for_unit.data\n \n\n db.session.commit()\n flash(f\"{cert.cert_name} has been updated\")\n return redirect(\"/administrator\")\n\n return render_template(\"/admin/edit_cert.html\", form=form, cert = cert)",
"def edit(self, *args, **kw):\n\t\t\ttmpl_context.widget = self.edit_form\n\t\t\tpks \t\t= self.provider.get_primary_fields(self.model)\n\t\t\tkw \t\t\t= {}\n\n\t\t\tfor i, pk in enumerate(pks):\n\t\t\t\tkw[pk] \t\t= args[i]\n\n\t\t\tvalue \t\t= self.edit_filler.get_value(kw)\n\t\t\tvalue['_method'] \t= 'PUT'\n\n\t\t\treturn dict(value = value, model = self.model.__name__, pk_count = len(pks))",
"def test_update_entry_courses(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass",
"def edit_subject(request,subject_id):\n\n\temp = models.Teacher.objects.get(user=request.user)\n\tif not emp.subject_permit:\n\t\traise Http404\n\tsubject = models.Subject.objects.filter(\n\t\tpk=subject_id, soft_delete=False\n\t).first()\n\tif not subject:\n\t\traise Http404\n\tcontext_dict = {\n\t\t\"all_courses\": context_helper.course_helper(),\n\t\t\"subject_types\": context_helper.subject_type_helper(),\n\t\t'subject_id': subject_id,\n\t}\n\tif request.method == 'POST':\n\t\tupdate_fields = []\n\t\tactivity = ''\n\t\tcourse = request.POST.get('course_picker')\n\t\tname = request.POST.get('sname')\n\t\tsid = request.POST.get('sid')\n\t\tstype = request.POST.get('subject_picker')\n\t\tmaxmarks = request.POST.get('marks')\n\t\ttry:\n\t\t\tif str(subject.course.pk) != str(course):\n\t\t\t\tsubject.course = models.Course.objects.get(pk=course)\n\t\t\t\tupdate_fields.append('course')\n\t\t\t\tactivity += 'Changed course to ' + str(course) + '.\\n'\n\t\t\tif subject.s_type != stype:\n\t\t\t\tsubject.s_type = stype\n\t\t\t\tupdate_fields.append('s_type')\n\t\t\t\tactivity += 'Changed subject type to ' + str(stype) + '.\\n'\n\t\t\tif subject.name != name:\n\t\t\t\tsubject.name = name\n\t\t\t\tupdate_fields.append('name')\n\t\t\t\tactivity += 'Changed subject name to' + str(name) + '.\\n'\n\t\t\tif subject.s_id != sid:\n\t\t\t\tsubject.s_id = sid\n\t\t\t\tupdate_fields.append('s_id')\n\t\t\t\tactivity += 'Changed subject ID to' + str(sid) + '.\\n'\n\t\t\tif subject.max_marks != maxmarks:\n\t\t\t\tsubject.max_marks = maxmarks\n\t\t\t\tupdate_fields.append('max_marks')\n\t\t\t\tactivity += 'Changed maximum marks to' + str(maxmarks) + '.\\n'\n\t\t\tsubject.save(update_fields=update_fields)\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity=activity,\n\t\t\t\tactivity_type=\"edit subject\"\n\t\t\t)\n\t\t\thistory.save()\n\t\t\tcontext_dict[\"message\"] = 'Successfully updated Result Data.'\n\t\t\tcontext_dict[\"success\"] = True\n\t\texcept Exception as e:\n\t\t\tcontext_dict[\"message\"] = str(e)\n\t\t\tcontext_dict[\"success\"] = False\n\t\t\tprint(e)\n\tcontext_dict.update(context_helper.get_subject_info(subject))\n\tfor i in context_dict['courses']:\n\t\ttry: del context_dict['all_courses'][i]\n\t\texcept: pass\n\tfor i in context_dict['subject_type']:\n\t\ttry: context_dict['subject_types'].remove(i)\n\t\texcept: pass\n\tif context_dict.get('success', False):\n\t\treturn HttpResponseRedirect('/view-subjects')\n\treturn render(\n\t\trequest, \"editSubject.html\", context_dict\n\t)",
"def edit_training(training_id):\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n training = Training.query.get_or_404(training_id)\n form = Training_Form(obj = training)\n\n if form.validate_on_submit():\n training.name = form.name.data,\n training.city = form.city.data,\n training.state = form.state.data,\n training.room = form.room.data,\n training.hours = form.hours.data\n\n db.session.commit()\n flash(f\"{training.name} has been updated\")\n return redirect(\"/administrator\")\n\n else:\n return render_template(\"/admin/edit_training.html\", form = form, training = training)",
"def edit(ctx, docid, password):\n coll = db.get_document_collection(ctx)\n config = ctx.obj[\"config\"]\n\n doc, docid = db.get_document_by_id(ctx, docid)\n title = doc[\"title\"]\n\n template, c = db.get_content(ctx, doc, password=password)\n\n content, tmpfile = utils.get_content_from_editor(config[\"editor\"], template=template)\n d = datetime.datetime.now()\n\n if doc[\"encrypted\"] is True:\n title = utils.get_title_from_content(content)\n content = c.encrypt_content(content.decode(\"utf-8\").encode(\"utf-8\"))\n else:\n if not \"links\" in doc[\"categories\"]:\n title = utils.get_title_from_content(content)\n\n if isinstance(template, unicode):\n content = content.decode(\"utf-8\")\n\n if content != template:\n doc[\"content\"] = content\n doc[\"title\"] = title\n doc[\"updated\"] = d\n if validate(doc):\n coll.save(doc)\n else:\n utils.log_error(\"Validation of the updated object did not succeed\")\n\n transaction.log(ctx, docid, \"edit\", title)\n utils.log_info(\"Document \\\"%s\\\" updated.\" % title)\n else:\n utils.log_info(\"No changes detected for \\\"%s\\\"\" % title)\n\n utils.clean_tmpfile(tmpfile)\n\n return True",
"def update_course_enrollment(self, student_id, course_id, course_section_id, term):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n try:\n with conn:\n cursor.execute(\n \"\"\"\n UPDATE course_enrollments\n SET course_id = ?, course_section_id = ?\n WHERE student_id = ?\n (?,?,?)\"\"\",\n (course_id, course_section_id, student_id),\n )\n return 1\n except sqlite3.IntegrityError:\n return -1",
"def edit_current_note():\n note_id = request.form.get(\"note_id\")\n\n edited_note = Note.query.get(note_id)\n\n edited_note.title_note = request.form.get(\"title\")\n edited_note.note = request.form.get(\"note\")\n\n\n db.session.commit()\n \n return \"note edited\"",
"def edit(tesserae, tessera_id):\n try:\n return tesserae.edit(tessera_id)\n except TesseraError, e:\n sys.stderr.write(\"Error: %s\\n\", str(e))\n return False",
"def careerCatagory_edit(request):\r\n action = tool.get_param_by_request(request.GET, 'action', \"add\", str)\r\n page_index = tool.get_param_by_request(request.GET, 'page_index', 1, int)\r\n\r\n careerCatagory = None\r\n if action == \"edit\" or action == \"show\":\r\n _id = tool.get_param_by_request(request.GET, 'id', 0, int)\r\n careerCatagory = api_careerCatagory.get_career_catagory_by_id(_id)\r\n\r\n if careerCatagory.is_error():\r\n return render_to_response(\"404.html\", {}, context_instance=RequestContext(request))\r\n\r\n careerCatagory = careerCatagory.result()[0]\r\n\r\n c = {\"careerCatagory\": careerCatagory, \"action\": action, \"page_index\": page_index}\r\n\r\n return render_to_response(\"mz_course/careerCatagory_save.html\", c, context_instance=RequestContext(request))",
"def put(self):\n request = transforms.loads(self.request.get('request'))\n\n if not self.assert_xsrf_token_or_fail(\n request, 'import-course', {'key': None}):\n return\n\n if not CourseOutlineRights.can_edit(self):\n transforms.send_json_response(self, 401, 'Access denied.', {})\n return\n\n payload = request.get('payload')\n course_raw = transforms.json_to_dict(\n transforms.loads(payload), self.SCHEMA_DICT)['course']\n\n source = None\n for acourse in sites.get_all_courses():\n if acourse.raw == course_raw:\n source = acourse\n break\n\n if not source:\n transforms.send_json_response(\n self, 404, 'Object not found.', {'raw': course_raw})\n return\n\n course = courses.Course(self)\n errors = []\n try:\n course.import_from(source, errors)\n except Exception as e: # pylint: disable-msg=broad-except\n logging.exception(e)\n errors.append('Import failed: %s' % e)\n\n if errors:\n transforms.send_json_response(self, 412, '\\n'.join(errors))\n return\n\n course.save()\n transforms.send_json_response(self, 200, 'Imported.')",
"def edit_recipe(description):\n session['description']=description\n if request.method == 'POST':\n des_result=(USERS[session['username']].recipe_category[session['current_recipe_category_title']].\n update_description(session['description'], request.form['description']))\n status_result=(USERS[session['username']].recipe_category[session['current_recipe_category_title']].\n update_status(session['description'], request.form['status']))\n if des_result == 'recipe updated' or status_result == 'recipe updated':\n flash('recipe updated', 'info')\n else:\n flash(des_result, 'warning')\n return redirect(url_for('edit_recipe', recipe_category_title=session['current_recipe_category_title']))\n return render_template('edit_recipe.html', item=USERS[session['username']]\n .recipe_category[session['current_recipe_category_title']].recipes[description],\n recipes=USERS[session['username']].\n recipe_category[session['current_recipe_category_title']].recipes)",
"def editDetail(id):\n form = EditDetailForm(request.form)\n if request.method == \"GET\":\n return render_template(\"/pages/edit.html\", form=form)\n else:\n choose = True\n section = form.category.data\n return redirect(url_for(\"editDetailSection\", id=id ,section=section))",
"def editConcept(self):\n if self.concept_list.currentIndex().isValid():\n concept = self.concept_list.selectedItems()[0].data(Qt.UserRole)[0]\n subcategory = self.concept_list.selectedItems()[0].data(Qt.UserRole)[1]\n dlg = EditConceptDialog(self, concept, subcategory)\n if dlg.exec_():\n concept, subcategory = dlg.getValue()\n self.db.update_concept(concept)\n self.db.update_subcategory(subcategory)\n self.search()",
"def edit_form():\n return template (\"edit\")",
"def edit(self, *args, **kw):\n tmpl_context.widget = self.edit_form\n #pks = self.provider.get_primary_fields(self.model)\n \n log.debug(\"soyRomperLB= %s\" %kw)\n\n ###########################################\n pks = self.provider.get_primary_fields(self.model)\n \n ###########################################\n kw = {}\n for i, pk in enumerate(pks):\n kw[pk] = args[i]\n value = self.edit_filler.get_value(kw)\n value['_method'] = 'PUT'\n return dict(value=value, model=self.model.__name__, pk_count=len(pks))",
"def edit_student(request, student_id):\n\n\temp = models.Teacher.objects.get(user=request.user)\n\tif not emp.student_permit:\n\t\traise Http404\n\tstudent = models.Student.objects.filter(\n\t\tpk=student_id, soft_delete=False\n\t).first()\n\tif not student:\n\t\traise Http404\n\tcontext_dict = {\n\t\t\"all_courses\": context_helper.course_helper(),\n\t\t\"blood_groups\": context_helper.blood_group_helper(),\n\t\t\"guardian_types\": context_helper.guardian_type_helper(),\n\t\t\"gender_types\": context_helper.gender_helper(),\n\t\t'student_id': student_id\n\t}\n\tif request.method == 'POST':\n\t\tupdate_fields = []\n\t\tactivity = ''\n\t\tsname = request.POST.get('sname')\n\t\troll = request.POST.get('rno')\n\t\tdob = request.POST.get('dob')\n\t\tgender = request.POST.get('gender_picker')\n\t\tbgroup = request.POST.get('blood_group_picker')\n\t\tif bgroup == 'Choose option':\n\t\t\tbgroup = None\n\t\tphone = request.POST.get('phone')\n\t\tcurradd = request.POST.get('curradd')\n\t\tpermadd = request.POST.get('permadd')\n\t\tgname = request.POST.get('gname')\n\t\tcourse = request.POST.get('course_picker')\n\t\tbatch = request.POST.get('batch')\n\t\tgtype = request.POST.get('guardian_type_picker')\n\t\tgphone = request.POST.get('gphone')\n\t\temail = request.POST.get('email')\n\t\taddress_flag = request.POST.get('address_flag')\n\t\tprint (address_flag)\n\t\taddress_flag = True if address_flag == 'on' else False\n\t\tif address_flag == True:\n\t\t\tpermadd = curradd\n\t\ttry:\n\t\t\tif \"profile-img\" in request.FILES:\n\t\t\t\tstudent.photo = request.FILES[\"profile-img\"]\n\t\t\t\tupdate_fields.append('photo')\n\t\t\t\tactivity += 'Changed photo.\\n'\n\t\t\tif student.name != sname:\n\t\t\t\tstudent.name = sname\n\t\t\t\tupdate_fields.append('name')\n\t\t\t\tactivity += 'Changed name to '+ str(sname) +'.\\n'\n\t\t\tif student.roll_no != roll:\n\t\t\t\tstudent.roll_no = roll\n\t\t\t\tupdate_fields.append('roll_no')\n\t\t\t\tactivity += 'Changed roll number to '+ str(roll) +'.\\n'\n\t\t\tif str(student.dob) != str(dob):\n\t\t\t\tstudent.dob = dob\n\t\t\t\tupdate_fields.append('dob')\n\t\t\t\tactivity += 'Changed DOB to ' + str(dob) + '.\\n'\n\t\t\tif student.gender != gender:\n\t\t\t\tstudent.gender = gender\n\t\t\t\tupdate_fields.append('gender')\n\t\t\t\tactivity += 'Changed gender to ' + str(gender) + '.\\n'\n\t\t\tif student.blood_group != bgroup:\n\t\t\t\tstudent.blood_group = bgroup\n\t\t\t\tupdate_fields.append('blood_group')\n\t\t\t\tactivity += 'Changed blood group to ' + str(bgroup) + '.\\n'\n\t\t\tif student.phone != phone:\n\t\t\t\tstudent.phone = phone\n\t\t\t\tupdate_fields.append('phone')\n\t\t\t\tactivity += 'Changed phone number to ' + str(phone) + '.\\n'\n\t\t\tif student.curr_address != curradd:\n\t\t\t\tstudent.curr_address = curradd\n\t\t\t\tupdate_fields.append('curr_address')\n\t\t\t\tactivity += 'Changed current address to ' + str(curradd) + '.\\n'\n\t\t\tif student.perm_address != permadd:\n\t\t\t\tstudent.perm_address = permadd\n\t\t\t\tupdate_fields.append('perm_address')\n\t\t\t\tactivity += 'Changed permanent address to ' + str(permadd) + '.\\n'\n\t\t\tif student.curr_address != curradd:\n\t\t\t\tstudent.curr_address = curradd\n\t\t\t\tupdate_fields.append('curr_address')\n\t\t\t\tactivity += 'Changed current address to ' + str(curradd) + '.\\n'\n\t\t\tif student.guardian_name != gname:\n\t\t\t\tstudent.guardian_name = gname\n\t\t\t\tupdate_fields.append('guardian_name')\n\t\t\t\tactivity += 'Changed current address to ' + str(gname) + '.\\n'\n\t\t\tif student.guardian_phone != gphone:\n\t\t\t\tstudent.guardian_phone = gphone\n\t\t\t\tupdate_fields.append('guardian_phone')\n\t\t\t\tactivity += 'Changed guardian phone to ' + str(gphone) + '.\\n'\n\t\t\tif student.guardian_type != gtype:\n\t\t\t\tstudent.guardian_type = gtype\n\t\t\t\tupdate_fields.append('guardian_type')\n\t\t\t\tactivity += 'Changed current address to ' + str(gtype) + '.\\n'\n\t\t\tif str(student.course.pk) != str(course):\n\t\t\t\tstudent.course = models.Course.objects.get(pk=course)\n\t\t\t\tupdate_fields.append('course')\n\t\t\t\tactivity += 'Changed course to ' + str(course) + '.\\n'\n\t\t\tif student.batch != batch:\n\t\t\t\tstudent.batch = batch\n\t\t\t\tupdate_fields.append('batch')\n\t\t\t\tactivity += 'Changed batch to' + str(batch) + '.\\n'\n\t\t\tif student.email != email:\n\t\t\t\tstudent.email = email\n\t\t\t\tupdate_fields.append('email')\n\t\t\t\tactivity += 'Changed email to ' + str(email) + '.\\n'\n\t\t\tif student.address_flag != address_flag:\n\t\t\t\tstudent.address_flag = address_flag\n\t\t\t\tupdate_fields.append('address_flag')\n\t\t\t\tactivity += 'Changed address flag.'\n\t\t\tstudent.save(update_fields=update_fields)\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity=activity,\n\t\t\t\tactivity_type=\"edit student\"\n\t\t\t)\n\t\t\thistory.save()\n\t\t\tcontext_dict[\"message\"] = 'Successfully updated student.'\n\t\t\tcontext_dict[\"success\"] = True\n\t\texcept Exception as e:\n\t\t\tcontext_dict[\"message\"] = str(e)\n\t\t\tcontext_dict[\"success\"] = False\n\t\t\tprint(e)\n\tcontext_dict.update(context_helper.get_student_info(student))\n\tif type(context_dict['dob']) == str:\n\t\tcontext_dict['dob'] = datetime.strptime(context_dict['dob'], '%Y-%m-%d')\n\tfor i in context_dict['course']:\n\t\ttry: del context_dict['all_courses'][i]\n\t\texcept: pass\n\tfor i in context_dict['blood_group']:\n\t\ttry: context_dict['blood_groups'].remove(i)\n\t\texcept: pass\n\tfor i in context_dict['guardian_type']:\n\t\ttry: context_dict['guardian_types'].remove(i)\n\t\texcept: pass\n\tfor i in context_dict['gender_type']:\n\t\ttry: context_dict['gender_types'].remove(i)\n\t\texcept: pass\n\tif context_dict.get('success', False):\n\t\treturn HttpResponseRedirect('/view-students')\n\treturn render(\n\t\trequest, \"editStudent.html\", context_dict\n\t)",
"def edit(self, id, *args, **kw):\n atras = \"/rolesplantilla/\"\n if (not kw['contexto']):\n redirect('../')\n elif (kw['contexto'] == \"proyecto\"):\n selector = SelectorPermisosPlantillaProy\n elif (kw['contexto'] == \"fase\"):\n selector = SelectorPermisosPlantillaFase\n elif (kw['contexto'] == \"ti\"):\n kw[\"contexto\"] = u\"Tipo de Ítem\"\n selector = SelectorPermisosPlantillaTi\n \n self.edit_form = RolPlantillaEditForm(DBS=DBSession, selector=selector) \n tmpl_context.widget = self.edit_form\n rol_plantilla_edit_form = self.edit_form\n \n \n page=u\"Editar Rol Plantilla de {contexto}\".format(contexto=kw['contexto'])\n \n value = self.edit_filler.get_value(values={'id_rol': int(id)})\n \n #agregado\n if value[\"tipo\"].find(\"Plantilla\") < 0:\n page=u\"Editar Rol de {contexto}\".format(contexto=kw['contexto'])\n atras = \"/roles/\"\n \n return dict(value=value, page=page, atras=atras)",
"def edit(self, *args, **kw):\n pp = PoseePermiso('modificar rol')\n if not pp.is_met(request.environ):\n flash(pp.message % pp.nombre_permiso, 'warning')\n redirect(self.action)\n tmpl_context.widget = self.edit_form\n value = self.edit_filler.get_value(values={'id_rol': int(args[0])})\n page = \"Rol {nombre}\".format(nombre=value[\"nombre_rol\"])\n atras = self.action\n return dict(value=value, page=page, atras=atras)"
]
| [
"0.75889885",
"0.6766243",
"0.66960055",
"0.6695456",
"0.66872954",
"0.6605374",
"0.6586517",
"0.6531737",
"0.6449789",
"0.6433642",
"0.64253557",
"0.6294824",
"0.6264496",
"0.62369275",
"0.6212748",
"0.6182824",
"0.616313",
"0.61455786",
"0.61435974",
"0.6114365",
"0.6085732",
"0.60855347",
"0.60720515",
"0.60660684",
"0.60396767",
"0.60174745",
"0.60031015",
"0.60012275",
"0.59950644",
"0.59810424"
]
| 0.80736226 | 0 |
Fuction to remove course goals from the db | def remove_course_goals(self, course):
DELETE_COURSE_GOALS = """DELETE FROM CourseGoals WHERE course_name = %s"""
self.db_cursor.execute(DELETE_COURSE_GOALS, (course.name,))
self.db_connection.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_course_in_section_goal_grades(self, course):\n DELETE_COURSE_SECTION_GOAL_GRADES = \"\"\"DELETE FROM SectionGoalGrades WHERE course = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_SECTION_GOAL_GRADES, (course.name,))\n self.db_connection.commit()",
"def remove_curriculum_goals(self, curriculum):\n DELETE_CURRICULUM_GOALS = \"\"\"DELETE FROM Goal WHERE curriculum_name = %s\"\"\"\n\n try:\n self.db_cursor.execute(DELETE_CURRICULUM_GOALS, (curriculum.name,))\n self.db_connection.commit()\n except:\n logging.warning(\"DBAdapter: Error- Could not delete curriculum goals.\")",
"def remove_course(self, course):\n DELETE_COURSE = \"\"\"DELETE FROM Course WHERE name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE, (course.name,))\n self.db_connection.commit()",
"def delete():\n Course.print_all_crs()\n course_name = input(\"Please, type course name >\")\n c = Course(course_name)\n if c.is_course_exists():\n db = Course._file.read_db()\n for crs_i in range(len(db[\"courses\"])):\n if db[\"courses\"][crs_i][\"course_name\"] == course_name:\n del db[\"courses\"][crs_i]\n break\n Course._file.write_db(db)\n print(\"{} course is deleted\".format(course_name))\n else:\n print(\"Failed. {} course does not exist\".format(course_name))",
"def del_done():\n # This function works just like the deleting function\n c.execute(\"DELETE FROM activities WHERE status = 'done' AND Frequency != 'correct'\")\n conn.commit()",
"def test_delete_goal(self):\n pass",
"def remove_empty_courses(self):\n pass",
"def test_deleting_goal(self):\n\n delete_goal(1)\n self.assertIsNone(Goal.query.get(1))",
"def clean_exam():\n data = Exam.objects.all()\n data.delete()",
"def delete_goal(self, task_name):\r\n\t\twith self.conn:\r\n\t\t\tself.c.execute(\"\"\"DELETE FROM goals WHERE task = ?\"\"\", (task_name,))\r\n\t\treturn self.c.rowcount",
"def remove_course(self, key: str):\n if key in self._courses:\n self._total_load -= self._courses[key].credit_load\n self._total_diff -= self._courses[key].difficulty\n del self._courses[key]\n if len(self._courses) > 0:\n self._diff_rating = self._total_diff / len(self._courses)\n else:\n self._diff_rating = 0.0\n return True\n return False",
"def remove_course_in_section_grades(self, course):\n DELETE_COURSE_SECTION_GRADES = \"\"\"DELETE FROM SectionGrades WHERE course = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_SECTION_GRADES, (course.name,))\n self.db_connection.commit()",
"def remove_course_in_curriculum_listings(self, course):\n DELETE_CURRICULUM_COURSES = \"\"\"DELETE FROM CurriculumListings WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_CURRICULUM_COURSES, (course.name,))\n self.db_connection.commit()",
"def remove_course_in_section(self, course):\n DELETE_COURSE_SECTIONS = \"\"\"DELETE FROM Section WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_SECTIONS, (course.name,))\n self.db_connection.commit()",
"def test_removing_course(self):\n a_user = User.objects.create(first_name=\"2\", last_name=\"test\", username=\"test\")\n user = VSBUser.objects.create(user=a_user)\n inst = Institution.objects.create(name = \"UVA\")\n c1 = Course.objects.create(name = \"CS 3240\", institution= inst)\n c2 = Course.objects.create(name = \"CS 1110\", institution= inst)\n\n user.add_course(c1)\n user.add_course(c2)\n user.remove_course(c1)\n\n expected = 1\n\n received = len(user.get_courses())\n\n self.assertEqual(received, expected, msg=\"Course_And_Topic.removing_course: Removing courses failed.\")",
"def userstory_eliminar(request,id_proyecto, id_userstory):\n #El sistema permitira la eliminacion de User Story solo si el mismo se en-\n #cuentra dentro del Backlog.\n #No se eliminael US si esta Resuelta\n band=False\n\n rol_en_proyecto=Equipo.objects.get(usuario_id=request.user.pk, proyecto_id=id_proyecto)\n rol = Group.objects.get(id=rol_en_proyecto.rol.pk)\n user_permissions_groups = list(rol.permissions.all())\n\n for p in user_permissions_groups:\n if (p.codename == 'delete_userstory'):\n band = True\n\n if (band == True):\n userstoryDelLogic = Userstory.objects.get(pk=id_userstory)\n\n if ((userstoryDelLogic.estado == \"Nueva\") or (userstoryDelLogic.estado == \"InPlanning\") or (userstoryDelLogic.estado == \"EnCurso\") or (userstoryDelLogic.estado == \"Comentarios\")):\n userstoryDelLogic.activo=False\n userstoryDelLogic.save()\n return HttpResponseRedirect('/proyectos/')\n else:\n raise Http404(\"No cuenta con los permisos necesarios\")",
"def remove():\n\n db_remove()",
"def test_delete_grading_period_courses(self):\r\n course_id = None # Change me!!\r\n id = None # Change me!!\r\n\r\n r = self.client.delete_grading_period_courses(id, course_id)",
"def delete_course_index(self, course_index):\r\n return self.course_index.remove(son.SON([('org', course_index['org']), ('offering', course_index['offering'])]))",
"def remove_course(roster, student, course):\r\n roster[student].remove(course)",
"def remove():",
"def delete_savings_goal():\n\n current_goal = SavingsGoal.query.filter(\n SavingsGoal.user_id == str(current_user.id)).one_or_none()\n\n db.session.delete(current_goal)\n db.session.commit()\n\n flash('Savings goal was successfully deleted', 'warning')\n return redirect(url_for('savings.savings_display'))",
"def remove(self):",
"def remove_curriculum_courses(self, curriculum):\n DELETE_FROM_CURRICULUM_LISTINGS = \"\"\"DELETE FROM CurriculumListings WHERE curriculum_name = %s\"\"\"\n\n try:\n self.db_cursor.execute(DELETE_FROM_CURRICULUM_LISTINGS, (curriculum.name,))\n self.db_connection.commit()\n except:\n logging.warning(\"DBAdapter: Error- Could not delete curriculum courses.\")",
"def deleteNewActivityAssistant(courseID):\n c = Course.objects.get(courseId=courseID)\n try:\n assistant = NewActivityCreated.objects.get(course_id=c.id)\n assistant.delete()\n return assistant\n except Exception:\n return None",
"def test_deleting_patient_goals(self):\n\n data = {\"goal\": 1}\n result = self.client.post(\"/delete-goal\", data=data)\n goal = Goal.query.get(1)\n\n self.assertEqual(result.status_code, 200)\n self.assertIsNone(goal)",
"def remove_tactic(self):\n tactic_removed = input(\"Enter a tactic to be removed: \")\n self.proof.tactics.remove(tactic_removed)\n for gene in self.population:\n gene.chromosome = [e for e in gene.chromosome if e != tactic_removed]",
"def remove_course(self, course):\n if course == isinstance(course, list):\n for c in course:\n if c in self.courses:\n self.courses.remove(c)\n else:\n self.courses.remove(course)",
"def remove(self, name):\r\n goals = self.goals()\r\n for goal in goals:\r\n if goal.name == name:\r\n goals.remove(goal)\r\n return self\r\n raise GoalError('Goal %s does not exist in this phase, members are: %s' % (name, goals))",
"def delete_all(self):\n models.CourseLearningOutcome.objects.all().delete()\n #models.CoreLearningOutcome.objects.all().delete()\n #models.CreditType.objects.all().delete()\n models.Course.objects.all().delete()\n models.DegreeProgram.objects.all().delete()\n models.DPCourseSpecific.objects.all().delete()\n models.DPCourseGeneric.objects.all().delete()\n models.DPCourseSubstituteSpecific.objects.all().delete()\n models.DPCourseSubstituteGeneric.objects.all().delete()"
]
| [
"0.73404795",
"0.6837276",
"0.6595358",
"0.64684933",
"0.64334375",
"0.641285",
"0.63084304",
"0.61660075",
"0.6162019",
"0.6108261",
"0.60963815",
"0.6087424",
"0.6062167",
"0.606025",
"0.59971017",
"0.59696966",
"0.5940118",
"0.5929227",
"0.59132004",
"0.59087265",
"0.58900225",
"0.58034146",
"0.5765623",
"0.576009",
"0.574638",
"0.573767",
"0.5723466",
"0.56959915",
"0.56826395",
"0.5664835"
]
| 0.8100842 | 0 |
Function to remove course topics from the db | def remove_course_topics(self, course):
DELETE_COURSE_TOPICS = """DELETE FROM CourseTopics WHERE course_name = %s"""
self.db_cursor.execute(DELETE_COURSE_TOPICS, (course.name,))
self.db_connection.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_delete_topic_courses(self):\r\n course_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.delete_topic_courses(topic_id, course_id)",
"def remove_curriculum_topics(self, curriculum):\n DELETE_FROM_CURRICULUM_TOPICS = \"\"\"DELETE FROM CurriculumTopics WHERE curriculum_name = %s\"\"\"\n\n try:\n self.db_cursor.execute(DELETE_FROM_CURRICULUM_TOPICS, (curriculum.name,))\n self.db_connection.commit()\n except:\n logging.warning(\"DBAdapter: Error- Could not delete curriculum topics.\")",
"def test_unsubscribe_from_topic_courses(self):\r\n course_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.unsubscribe_from_topic_courses(topic_id, course_id)",
"def test_delete_entry_courses(self):\r\n course_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n id = None # Change me!!\r\n\r\n r = self.client.delete_entry_courses(id, topic_id, course_id)",
"def remove_topics(self, project: str, *topics: str):\n assert self.exists(project), f'Project {project} inesistente'\n\n return self.collection.find_one_and_update(\n {\n 'url': project\n },\n {\n '$pull': {\n 'topics': {\n '$in': topics,\n }\n }\n }\n )",
"def delete_topic():\n return dict()",
"def remove_course(self, course):\n DELETE_COURSE = \"\"\"DELETE FROM Course WHERE name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE, (course.name,))\n self.db_connection.commit()",
"def remove_topic ( topics , level = ROOT.RooFit.INFO , stream = -1 ) :\n return Ostap.Utils.RemoveTopic ( topics , level , stream )",
"def delete():\n Course.print_all_crs()\n course_name = input(\"Please, type course name >\")\n c = Course(course_name)\n if c.is_course_exists():\n db = Course._file.read_db()\n for crs_i in range(len(db[\"courses\"])):\n if db[\"courses\"][crs_i][\"course_name\"] == course_name:\n del db[\"courses\"][crs_i]\n break\n Course._file.write_db(db)\n print(\"{} course is deleted\".format(course_name))\n else:\n print(\"Failed. {} course does not exist\".format(course_name))",
"def remove(cls, callback, topic):\n\t\tkey_name = cls.create_key_name(callback, topic)\n\t\tdef txn():\n\t\t\tsub = cls.get_by_key_name(key_name)\n\t\t\tif sub is not None:\n\t\t\t\tsub.delete()\n\t\t\t\treturn True\n\t\t\treturn False\n\t\treturn db.run_in_transaction(txn)",
"def remove_course_in_section(self, course):\n DELETE_COURSE_SECTIONS = \"\"\"DELETE FROM Section WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_SECTIONS, (course.name,))\n self.db_connection.commit()",
"def test_forum_unseeding_on_delete(self):\r\n test_course_data = self.assert_created_course(number_suffix=uuid4().hex)\r\n course_id = _get_course_id(test_course_data)\r\n self.assertTrue(are_permissions_roles_seeded(course_id))\r\n delete_course_and_groups(course_id, commit=True)\r\n # should raise an exception for checking permissions on deleted course\r\n with self.assertRaises(ItemNotFoundError):\r\n are_permissions_roles_seeded(course_id)",
"def test_topic_delete(topic):\n assert topic.user.post_count == 1\n assert topic.post_count == 1\n assert topic.forum.topic_count == 1\n assert topic.forum.post_count == 1\n\n topic.delete()\n\n forum = Forum.query.filter_by(id=topic.forum_id).first()\n user = User.query.filter_by(id=topic.user_id).first()\n topic = Topic.query.filter_by(id=topic.id).first()\n\n assert topic is None\n assert user.post_count == 0\n assert forum.topic_count == 0\n assert forum.post_count == 0\n assert forum.last_post_id is None",
"def remove():\n\n db_remove()",
"def test_removing_course(self):\n a_user = User.objects.create(first_name=\"2\", last_name=\"test\", username=\"test\")\n user = VSBUser.objects.create(user=a_user)\n inst = Institution.objects.create(name = \"UVA\")\n c1 = Course.objects.create(name = \"CS 3240\", institution= inst)\n c2 = Course.objects.create(name = \"CS 1110\", institution= inst)\n\n user.add_course(c1)\n user.add_course(c2)\n user.remove_course(c1)\n\n expected = 1\n\n received = len(user.get_courses())\n\n self.assertEqual(received, expected, msg=\"Course_And_Topic.removing_course: Removing courses failed.\")",
"def wipe_all_topics(self):\n # doc_count = self.posts_read.find({'subreddit':self.subreddit, 'postwise.topic_assignment':{'$exists':True}}).count()\n doc_count = self.posts_write.update({'subreddit':self.subreddit, 'postwise.topic_assignment':{'$exists':True}},\n {'$unset':{'postwise.topic_distro':True,'postwise.topic_assignment':True}}, multi=True)\n\n print 'wiped topics from %i documents' % doc_count['nModified']",
"def remove(request, word_to_remove):\n\n word_object = Word.objects.get(word__exact=word_to_remove)\n word_to_learn = WordsToLearn.objects.filter(\n user__id=request.user.id, word=word_object)\n word_to_learn.delete()\n return HttpResponseRedirect('/study')",
"def deleting_old_news() -> None:\n\n with app.app_context():\n delete_news_from_db()",
"def test_forum_delete_with_user_and_topic(topic, user):\n assert user.post_count == 1\n\n topic.forum.delete([user])\n\n forum = Forum.query.filter_by(id=topic.forum_id).first()\n\n assert forum is None\n\n assert user.post_count == 0",
"def deleteWord(self,chat_id, index):\n\t\tcommand = \"\"\"DELETE FROM words WHERE ID IN \n\t\t(SELECT words.ID FROM words JOIN courses ON words.course=courses.ID \n\t\t\tWHERE words.ID=? and courses.author_id=?);\"\"\"\n\t\tparams = (index, chat_id,)\n\n\t\tself._run_command(command, params)",
"def create_course_for_deletion(self):\r\n course = modulestore().create_course('nihilx', 'deletion', 'deleting_user')\r\n root = course.location.version_agnostic().for_branch('draft')\r\n for _ in range(4):\r\n self.create_subtree_for_deletion(root, ['chapter', 'vertical', 'problem'])\r\n return modulestore().get_item(root)",
"def deleteTopic(self, topic):\n self.deleteTopics((topic,))",
"def delete_topic(request, topic_id, forum_id):\n\tif request.user.is_authenticated() and request.user.is_staff:\n\t\tposts = Post.objects.filter(post_topic=topic_id).count()\n\t\tTopic.objects.get(id=topic_id).delete()\n\t\tPost.objects.filter(post_topic=topic_id).delete()\n\t\tforum = Forum.objects.get(id=forum_id)\n\t\tforum.forum_topics = forum.forum_topics -1\n\t\tforum.forum_posts = forum.forum_posts - posts\n\t\tforum.save()\n\t\treturn HttpResponseRedirect(\"/forum/forum/\" + forum_id +\"/\")\n\telse:\n\t\treturn render_to_response('pages/bug.html', {'bug': _('You aren\\'t a moderator')}, context_instance=RequestContext(request))",
"def DBDeleteLangRecords( lang ):\n log.info(\"Deleting old '%s' records...\", lang)\n return DBExecute(DBConjugations, \"DELETE FROM conjugations WHERE LanguageCode = ?\", lang)",
"def remove_course_in_curriculum_listings(self, course):\n DELETE_CURRICULUM_COURSES = \"\"\"DELETE FROM CurriculumListings WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_CURRICULUM_COURSES, (course.name,))\n self.db_connection.commit()",
"def remove_category(teach_id, cat_id):\n query = \"DELETE FROM teacher_categories WHERE teacher_account_id = %s and category_id = %s;\"\n args = (teach_id, cat_id)\n database.connection.save_data(query, args)",
"def deleteTopic():\n\n data = request.json\n if \"agenda_id\" in data and \"section_position\" in data and \"topic_position\" in data:\n if connectMongo.getAgendaById(data.get(\"agenda_id\")).found:\n responseWrapper: ResponseWrapper = connectMongo.deleteTopic(data.get(\"agenda_id\"),\n data.get(\"section_position\"),\n data.get(\"topic_position\"))\n if responseWrapper.operationDone:\n return jsonify(response=200, agenda=responseWrapper.object.makeJson())\n else:\n return jsonify(response=501, msg=\"Delete Failed\")\n else:\n return jsonify(response=404, msg=\"Agenda not found\")\n else:\n return jsonify(response=400, msg=\"you didn't sent all the necessary information\")",
"def test_category_delete_with_user(topic):\n user = topic.user\n forum = topic.forum\n category = topic.forum.category\n\n assert user.post_count == 1\n assert forum.post_count == 1\n assert forum.topic_count == 1\n\n category.delete([user])\n\n assert user.post_count == 0\n\n category = Category.query.filter_by(id=category.id).first()\n topic = Topic.query.filter_by(id=topic.id).first()\n\n assert category is None\n # The topic should also be deleted\n assert topic is None",
"def cleartopics(self):\n\n # Clear previous topics, if any\n if self.topics:\n for uid in self.scan():\n self.removeattribute(uid, \"topic\")\n self.removeattribute(uid, \"topicrank\")\n\n if self.categories:\n self.removeattribute(uid, \"category\")\n\n self.topics, self.categories = None, None",
"def clear_subjects(db):\n\t\n\tfor p_hash, p in db.all_papers.items():\n\t\tif p.subject:\n\t\t\tp.subject = None"
]
| [
"0.7267308",
"0.7162471",
"0.6669879",
"0.6615398",
"0.65995437",
"0.65227175",
"0.64070475",
"0.63127166",
"0.6255931",
"0.6247505",
"0.622795",
"0.61358047",
"0.6098038",
"0.60880136",
"0.6079159",
"0.5982441",
"0.5974049",
"0.5922804",
"0.5904841",
"0.5893492",
"0.5891639",
"0.5862065",
"0.5852539",
"0.58361065",
"0.5830961",
"0.58272964",
"0.5822449",
"0.5811404",
"0.5799119",
"0.5794713"
]
| 0.826377 | 0 |
Function to remove courses from a curriculum in the db | def remove_course_in_curriculum_listings(self, course):
DELETE_CURRICULUM_COURSES = """DELETE FROM CurriculumListings WHERE course_name = %s"""
self.db_cursor.execute(DELETE_CURRICULUM_COURSES, (course.name,))
self.db_connection.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_curriculum_courses(self, curriculum):\n DELETE_FROM_CURRICULUM_LISTINGS = \"\"\"DELETE FROM CurriculumListings WHERE curriculum_name = %s\"\"\"\n\n try:\n self.db_cursor.execute(DELETE_FROM_CURRICULUM_LISTINGS, (curriculum.name,))\n self.db_connection.commit()\n except:\n logging.warning(\"DBAdapter: Error- Could not delete curriculum courses.\")",
"def remove_curriculum(self, curriculum):\n DELETE_CURRICULUM = \"\"\"DELETE FROM Curriculum WHERE name = %s\"\"\"\n\n try:\n self.db_cursor.execute(DELETE_CURRICULUM, (curriculum.name,))\n self.db_connection.commit()\n except:\n logging.warning(\"DBAdapter: Error- Could not delete curriculum.\")",
"def remove_course(self, course):\n DELETE_COURSE = \"\"\"DELETE FROM Course WHERE name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE, (course.name,))\n self.db_connection.commit()",
"def delete():\n Course.print_all_crs()\n course_name = input(\"Please, type course name >\")\n c = Course(course_name)\n if c.is_course_exists():\n db = Course._file.read_db()\n for crs_i in range(len(db[\"courses\"])):\n if db[\"courses\"][crs_i][\"course_name\"] == course_name:\n del db[\"courses\"][crs_i]\n break\n Course._file.write_db(db)\n print(\"{} course is deleted\".format(course_name))\n else:\n print(\"Failed. {} course does not exist\".format(course_name))",
"def remove_course(roster, student, course):\r\n roster[student].remove(course)",
"def remove_course_in_section(self, course):\n DELETE_COURSE_SECTIONS = \"\"\"DELETE FROM Section WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_SECTIONS, (course.name,))\n self.db_connection.commit()",
"def remove_empty_courses(self):\n pass",
"def remove_courses(self, *course_keys):\r\n entries = CourseAccessRole.objects.filter(user=self.user, role=self.role, course_id__in=course_keys)\r\n entries.delete()\r\n if hasattr(self.user, '_roles'):\r\n del self.user._roles",
"def test_removing_course(self):\n a_user = User.objects.create(first_name=\"2\", last_name=\"test\", username=\"test\")\n user = VSBUser.objects.create(user=a_user)\n inst = Institution.objects.create(name = \"UVA\")\n c1 = Course.objects.create(name = \"CS 3240\", institution= inst)\n c2 = Course.objects.create(name = \"CS 1110\", institution= inst)\n\n user.add_course(c1)\n user.add_course(c2)\n user.remove_course(c1)\n\n expected = 1\n\n received = len(user.get_courses())\n\n self.assertEqual(received, expected, msg=\"Course_And_Topic.removing_course: Removing courses failed.\")",
"def remove_course(self, course):\n if course == isinstance(course, list):\n for c in course:\n if c in self.courses:\n self.courses.remove(c)\n else:\n self.courses.remove(course)",
"def remove_curriculum_goals(self, curriculum):\n DELETE_CURRICULUM_GOALS = \"\"\"DELETE FROM Goal WHERE curriculum_name = %s\"\"\"\n\n try:\n self.db_cursor.execute(DELETE_CURRICULUM_GOALS, (curriculum.name,))\n self.db_connection.commit()\n except:\n logging.warning(\"DBAdapter: Error- Could not delete curriculum goals.\")",
"def remove_course(self, term, schedule, crn):\n query = {'Term': term.code,\n 'Schedule': schedule,\n 'CourseID': crn,\n 'ShowDebug': 0,\n '_': int(float(time.time()) * 10**3)}\n\n self.get(self.REMOVE_COURSE_ENDPOINT, params=query)",
"def remove_course(self, key: str):\n if key in self._courses:\n self._total_load -= self._courses[key].credit_load\n self._total_diff -= self._courses[key].difficulty\n del self._courses[key]\n if len(self._courses) > 0:\n self._diff_rating = self._total_diff / len(self._courses)\n else:\n self._diff_rating = 0.0\n return True\n return False",
"def remove_curriculum_topics(self, curriculum):\n DELETE_FROM_CURRICULUM_TOPICS = \"\"\"DELETE FROM CurriculumTopics WHERE curriculum_name = %s\"\"\"\n\n try:\n self.db_cursor.execute(DELETE_FROM_CURRICULUM_TOPICS, (curriculum.name,))\n self.db_connection.commit()\n except:\n logging.warning(\"DBAdapter: Error- Could not delete curriculum topics.\")",
"def remove_course_in_section_grades(self, course):\n DELETE_COURSE_SECTION_GRADES = \"\"\"DELETE FROM SectionGrades WHERE course = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_SECTION_GRADES, (course.name,))\n self.db_connection.commit()",
"def delete_course(self, course_key, user_id=None):\r\n index = self.db_connection.get_course_index(course_key)\r\n if index is None:\r\n raise ItemNotFoundError(course_key)\r\n # this is the only real delete in the system. should it do something else?\r\n log.info(u\"deleting course from split-mongo: %s\", course_key)\r\n self.db_connection.delete_course_index(index)",
"def remove_course_in_section_goal_grades(self, course):\n DELETE_COURSE_SECTION_GOAL_GRADES = \"\"\"DELETE FROM SectionGoalGrades WHERE course = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_SECTION_GOAL_GRADES, (course.name,))\n self.db_connection.commit()",
"def remove_course_goals(self, course):\n DELETE_COURSE_GOALS = \"\"\"DELETE FROM CourseGoals WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_GOALS, (course.name,))\n self.db_connection.commit()",
"def remove_course(self, key: str, index: int):\n if index >= len(self._semesters):\n raise IndexError(\"Index given was beyond the bounds of self._semesters\")\n return self._semesters[index].remove_course(key)",
"def _rm_edx4edx(self):\r\n def_ms = modulestore()\r\n course_path = '{0}/edx4edx_lite'.format(\r\n os.path.abspath(settings.DATA_DIR))\r\n try:\r\n # using XML store\r\n course = def_ms.courses.get(course_path, None)\r\n except AttributeError:\r\n # Using mongo store\r\n course = def_ms.get_course(SlashSeparatedCourseKey('MITx', 'edx4edx', 'edx4edx'))\r\n\r\n # Delete git loaded course\r\n response = self.client.post(\r\n reverse('sysadmin_courses'),\r\n {\r\n 'course_id': course.id.to_deprecated_string(),\r\n 'action': 'del_course',\r\n }\r\n )\r\n self.addCleanup(self._rm_glob, '{0}_deleted_*'.format(course_path))\r\n\r\n return response",
"def test_delete_entry_courses(self):\r\n course_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n id = None # Change me!!\r\n\r\n r = self.client.delete_entry_courses(id, topic_id, course_id)",
"def test_delete_grading_period_courses(self):\r\n course_id = None # Change me!!\r\n id = None # Change me!!\r\n\r\n r = self.client.delete_grading_period_courses(id, course_id)",
"def delete_course(self, course_key, user_id): # lint-amnesty, pylint: disable=arguments-differ\n # this is the only real delete in the system. should it do something else?\n log.info(\"deleting course from split-mongo: %s\", course_key)\n self.delete_course_index(course_key)\n\n # We do NOT call the super class here since we need to keep the assets\n # in case the course is later restored.\n # super(SplitMongoModuleStore, self).delete_course(course_key, user_id)\n\n self._emit_course_deleted_signal(course_key)",
"def delete_course_index(self, course_index):\r\n return self.course_index.remove(son.SON([('org', course_index['org']), ('offering', course_index['offering'])]))",
"def create_course_for_deletion(self):\r\n course = modulestore().create_course('nihilx', 'deletion', 'deleting_user')\r\n root = course.location.version_agnostic().for_branch('draft')\r\n for _ in range(4):\r\n self.create_subtree_for_deletion(root, ['chapter', 'vertical', 'problem'])\r\n return modulestore().get_item(root)",
"def delete_course(self, course_key, user_id=None):\r\n course_query = self._course_key_to_son(course_key)\r\n self.collection.remove(course_query, multi=True)",
"def test_mongo_course_add_delete(self):\r\n\r\n self._setstaff_login()\r\n self._mkdir(getattr(settings, 'GIT_REPO_DIR'))\r\n\r\n def_ms = modulestore()\r\n self.assertFalse(isinstance(def_ms, XMLModuleStore))\r\n\r\n self._add_edx4edx()\r\n course = def_ms.get_course(SlashSeparatedCourseKey('MITx', 'edx4edx', 'edx4edx'))\r\n self.assertIsNotNone(course)\r\n\r\n self._rm_edx4edx()\r\n course = def_ms.get_course(SlashSeparatedCourseKey('MITx', 'edx4edx', 'edx4edx'))\r\n self.assertIsNone(course)",
"def test_course_delete(self, app, auth):\n app.admin.add_new_course()\n course_data = CreateCourse.random()\n app.course.create_course(course_data)\n app.admin.manage_courses()\n app.course.delete_course()\n assert (course_data.short_course_name in app.course.sure_delete()), \\\n \"The course was not deleted!\"",
"def delete_course(course_id: int, db: Session = Depends(get_db)):\n\n try:\n crud.course.delete(db, obj_id=course_id)\n except Exception as error:\n logger.error(f'{error}')\n raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f'{error}')\n\n return Response(status_code=status.HTTP_204_NO_CONTENT)",
"def remove_course_topics(self, course):\n DELETE_COURSE_TOPICS = \"\"\"DELETE FROM CourseTopics WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_TOPICS, (course.name,))\n self.db_connection.commit()"
]
| [
"0.82798076",
"0.74495906",
"0.7176098",
"0.7132954",
"0.69807893",
"0.69125354",
"0.67496634",
"0.6682265",
"0.6664997",
"0.6659029",
"0.65438765",
"0.65389067",
"0.6500221",
"0.6454423",
"0.64459604",
"0.6284018",
"0.6217394",
"0.61738217",
"0.6137372",
"0.6113713",
"0.61076456",
"0.6052512",
"0.59801346",
"0.5934987",
"0.59242827",
"0.5905106",
"0.5897063",
"0.5888862",
"0.58782816",
"0.5822075"
]
| 0.8008807 | 1 |
Function to remove all sections of a course in the db | def remove_course_in_section(self, course):
DELETE_COURSE_SECTIONS = """DELETE FROM Section WHERE course_name = %s"""
self.db_cursor.execute(DELETE_COURSE_SECTIONS, (course.name,))
self.db_connection.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_course_in_section_grades(self, course):\n DELETE_COURSE_SECTION_GRADES = \"\"\"DELETE FROM SectionGrades WHERE course = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_SECTION_GRADES, (course.name,))\n self.db_connection.commit()",
"def remove_empty_courses(self):\n pass",
"def remove_course_in_section_goal_grades(self, course):\n DELETE_COURSE_SECTION_GOAL_GRADES = \"\"\"DELETE FROM SectionGoalGrades WHERE course = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_SECTION_GOAL_GRADES, (course.name,))\n self.db_connection.commit()",
"def delete():\n Course.print_all_crs()\n course_name = input(\"Please, type course name >\")\n c = Course(course_name)\n if c.is_course_exists():\n db = Course._file.read_db()\n for crs_i in range(len(db[\"courses\"])):\n if db[\"courses\"][crs_i][\"course_name\"] == course_name:\n del db[\"courses\"][crs_i]\n break\n Course._file.write_db(db)\n print(\"{} course is deleted\".format(course_name))\n else:\n print(\"Failed. {} course does not exist\".format(course_name))",
"def remove_course_in_curriculum_listings(self, course):\n DELETE_CURRICULUM_COURSES = \"\"\"DELETE FROM CurriculumListings WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_CURRICULUM_COURSES, (course.name,))\n self.db_connection.commit()",
"def filter_sections(courses, selected_sections):\n for c in courses:\n c_key = f\"{c.name} {c.num}\"\n\n lab_section = selected_sections[c_key][\"lab\"]\n lecture_section = selected_sections[c_key][\"lecture\"]\n tutorial_section = selected_sections[c_key][\"tutorial\"]\n\n c.labs = [s for s in c.labs if s.section == lab_section]\n c.lectures = [s for s in c.lectures if s.section == lecture_section]\n c.tutorials = [s for s in c.tutorials if s.section == tutorial_section]",
"def delete(self, *args, **kwargs):\n print(\"form delete\")\n self.is_deleted = True\n current_section_sequence = self.section_sequence\n\n #This can be modified if we have to hard delete the sections\n\n # for sec_id in current_section_sequence:\n # current_section = Sections.objects.get(id = sec_id )\n # current_section.delete()\n\n self.save()",
"def remove_course(self, course):\n DELETE_COURSE = \"\"\"DELETE FROM Course WHERE name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE, (course.name,))\n self.db_connection.commit()",
"def remove_section(self, section):\r\n for key in self.fields.keys():\r\n if key.startswith(section):\r\n del self.fields[key]",
"def remove_course(roster, student, course):\r\n roster[student].remove(course)",
"def remove_course(self, course):\n if course == isinstance(course, list):\n for c in course:\n if c in self.courses:\n self.courses.remove(c)\n else:\n self.courses.remove(course)",
"def test_delete_course(self):\r\n module_store = modulestore('direct')\r\n\r\n content_store = contentstore()\r\n draft_store = modulestore('draft')\r\n\r\n _, course_items = import_from_xml(module_store, 'common/test/data/', ['toy'], static_content_store=content_store)\r\n\r\n course_id = course_items[0].id\r\n\r\n # get a vertical (and components in it) to put into 'draft'\r\n vertical = module_store.get_item(course_id.make_usage_key('vertical', 'vertical_test'), depth=1)\r\n\r\n draft_store.convert_to_draft(vertical.location)\r\n for child in vertical.get_children():\r\n draft_store.convert_to_draft(child.location)\r\n\r\n # delete the course\r\n delete_course(module_store, content_store, course_id, commit=True)\r\n\r\n # assert that there's absolutely no non-draft modules in the course\r\n # this should also include all draft items\r\n items = module_store.get_items(course_id)\r\n self.assertEqual(len(items), 0)\r\n\r\n # assert that all content in the asset library is also deleted\r\n assets, count = content_store.get_all_content_for_course(course_id)\r\n self.assertEqual(len(assets), 0)\r\n self.assertEqual(count, 0)",
"def deleteSection():\n data = request.json\n if \"agenda_id\" in data and \"section_position\" in data:\n if connectMongo.getAgendaById(data.get(\"agenda_id\")).found:\n responseWrapper: ResponseWrapper = connectMongo.deleteSection(data.get(\"agenda_id\"),\n data.get(\"section_position\"))\n if responseWrapper.operationDone:\n return jsonify(response=200, agenda=responseWrapper.object.makeJson())\n else:\n return jsonify(response=501, msg=\"Delete Failed\")\n else:\n return jsonify(response=404, msg=\"Agenda not found\")\n else:\n return jsonify(response=400, msg=\"you didn't sent all the necessary information\")",
"def delete_sections(ids):\n Sections.delete_multiple_by_id(ids)\n return Sections.get_names()",
"def select_all_sections(self, course_id):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"\"\"SELECT * FROM course_sections \n WHERE course_id = ?\"\"\",\n (course_id,),\n )\n return cursor.fetchall()",
"def _rm_edx4edx(self):\r\n def_ms = modulestore()\r\n course_path = '{0}/edx4edx_lite'.format(\r\n os.path.abspath(settings.DATA_DIR))\r\n try:\r\n # using XML store\r\n course = def_ms.courses.get(course_path, None)\r\n except AttributeError:\r\n # Using mongo store\r\n course = def_ms.get_course(SlashSeparatedCourseKey('MITx', 'edx4edx', 'edx4edx'))\r\n\r\n # Delete git loaded course\r\n response = self.client.post(\r\n reverse('sysadmin_courses'),\r\n {\r\n 'course_id': course.id.to_deprecated_string(),\r\n 'action': 'del_course',\r\n }\r\n )\r\n self.addCleanup(self._rm_glob, '{0}_deleted_*'.format(course_path))\r\n\r\n return response",
"def remove_courses(self, *course_keys):\r\n entries = CourseAccessRole.objects.filter(user=self.user, role=self.role, course_id__in=course_keys)\r\n entries.delete()\r\n if hasattr(self.user, '_roles'):\r\n del self.user._roles",
"def delete_course(self, course_key, user_id=None):\r\n index = self.db_connection.get_course_index(course_key)\r\n if index is None:\r\n raise ItemNotFoundError(course_key)\r\n # this is the only real delete in the system. should it do something else?\r\n log.info(u\"deleting course from split-mongo: %s\", course_key)\r\n self.db_connection.delete_course_index(index)",
"def test_delete_entry_courses(self):\r\n course_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n id = None # Change me!!\r\n\r\n r = self.client.delete_entry_courses(id, topic_id, course_id)",
"def delete_course_index(self, course_index):\r\n return self.course_index.remove(son.SON([('org', course_index['org']), ('offering', course_index['offering'])]))",
"def create_course_for_deletion(self):\r\n course = modulestore().create_course('nihilx', 'deletion', 'deleting_user')\r\n root = course.location.version_agnostic().for_branch('draft')\r\n for _ in range(4):\r\n self.create_subtree_for_deletion(root, ['chapter', 'vertical', 'problem'])\r\n return modulestore().get_item(root)",
"def remove_curriculum_courses(self, curriculum):\n DELETE_FROM_CURRICULUM_LISTINGS = \"\"\"DELETE FROM CurriculumListings WHERE curriculum_name = %s\"\"\"\n\n try:\n self.db_cursor.execute(DELETE_FROM_CURRICULUM_LISTINGS, (curriculum.name,))\n self.db_connection.commit()\n except:\n logging.warning(\"DBAdapter: Error- Could not delete curriculum courses.\")",
"def delete_course(self, course_key, user_id): # lint-amnesty, pylint: disable=arguments-differ\n # this is the only real delete in the system. should it do something else?\n log.info(\"deleting course from split-mongo: %s\", course_key)\n self.delete_course_index(course_key)\n\n # We do NOT call the super class here since we need to keep the assets\n # in case the course is later restored.\n # super(SplitMongoModuleStore, self).delete_course(course_key, user_id)\n\n self._emit_course_deleted_signal(course_key)",
"def delete_course_index(self, course_key):\n if self._is_in_bulk_operation(course_key, False):\n self._clear_bulk_ops_record(course_key)\n\n self.db_connection.delete_course_index(course_key)",
"def remove_course(self, key: str, index: int):\n if index >= len(self._semesters):\n raise IndexError(\"Index given was beyond the bounds of self._semesters\")\n return self._semesters[index].remove_course(key)",
"def clean_exam():\n data = Exam.objects.all()\n data.delete()",
"def test_delete_course(mocker):\n mock_del_document = mocker.patch(\"search.search_index_helpers.deindex_document\")\n mock_bulk_del = mocker.patch(\n \"search.search_index_helpers.deindex_run_content_files\"\n )\n course = CourseFactory.create()\n course_es_id = gen_course_id(course.platform, course.course_id)\n\n deindex_course(course)\n mock_del_document.assert_called_once_with(course_es_id, COURSE_TYPE)\n for run in course.runs.iterator():\n mock_bulk_del.assert_any_call(run.id)",
"def delete_course_enrollment(self, student_id, course_id, course_section_id, term):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n try:\n with conn:\n cursor.execute(\n \"\"\"\n DELETE FROM course_enrollments\n WHERE student_id = ?, course_id = ?, course_section_id = ?, term = ?\n (?,?,?,?)\"\"\",\n (student_id, course_id, course_section_id, term),\n )\n return 1\n except sqlite3.IntegrityError:\n return -1",
"def remove_course_topics(self, course):\n DELETE_COURSE_TOPICS = \"\"\"DELETE FROM CourseTopics WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_TOPICS, (course.name,))\n self.db_connection.commit()",
"def test_delete_grading_period_courses(self):\r\n course_id = None # Change me!!\r\n id = None # Change me!!\r\n\r\n r = self.client.delete_grading_period_courses(id, course_id)"
]
| [
"0.7411444",
"0.678805",
"0.6720206",
"0.6509268",
"0.64289224",
"0.63652366",
"0.6362051",
"0.63241756",
"0.6254571",
"0.62334865",
"0.62213874",
"0.62109655",
"0.6164397",
"0.60369295",
"0.6029878",
"0.59812313",
"0.5970608",
"0.5964486",
"0.5936933",
"0.5905548",
"0.58899164",
"0.5853228",
"0.58163255",
"0.58107424",
"0.575903",
"0.5736842",
"0.57206744",
"0.5699643",
"0.56962216",
"0.5684401"
]
| 0.79037875 | 0 |
Function to remove occurances of course in section grades in the db | def remove_course_in_section_grades(self, course):
DELETE_COURSE_SECTION_GRADES = """DELETE FROM SectionGrades WHERE course = %s"""
self.db_cursor.execute(DELETE_COURSE_SECTION_GRADES, (course.name,))
self.db_connection.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_course_in_section_goal_grades(self, course):\n DELETE_COURSE_SECTION_GOAL_GRADES = \"\"\"DELETE FROM SectionGoalGrades WHERE course = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_SECTION_GOAL_GRADES, (course.name,))\n self.db_connection.commit()",
"def remove_course_in_section(self, course):\n DELETE_COURSE_SECTIONS = \"\"\"DELETE FROM Section WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_SECTIONS, (course.name,))\n self.db_connection.commit()",
"def remove_empty_courses(self):\n pass",
"def remove_course(roster, student, course):\r\n roster[student].remove(course)",
"def remove_course(self, course):\n if course == isinstance(course, list):\n for c in course:\n if c in self.courses:\n self.courses.remove(c)\n else:\n self.courses.remove(course)",
"def education_clean_row(row_of_data):\n education = row_of_data.get('education')\n z = list(set(remove_filler_words(education)))\n return z",
"def remove_course(self, key: str):\n if key in self._courses:\n self._total_load -= self._courses[key].credit_load\n self._total_diff -= self._courses[key].difficulty\n del self._courses[key]\n if len(self._courses) > 0:\n self._diff_rating = self._total_diff / len(self._courses)\n else:\n self._diff_rating = 0.0\n return True\n return False",
"def filter_sections(courses, selected_sections):\n for c in courses:\n c_key = f\"{c.name} {c.num}\"\n\n lab_section = selected_sections[c_key][\"lab\"]\n lecture_section = selected_sections[c_key][\"lecture\"]\n tutorial_section = selected_sections[c_key][\"tutorial\"]\n\n c.labs = [s for s in c.labs if s.section == lab_section]\n c.lectures = [s for s in c.lectures if s.section == lecture_section]\n c.tutorials = [s for s in c.tutorials if s.section == tutorial_section]",
"def remove_course_in_curriculum_listings(self, course):\n DELETE_CURRICULUM_COURSES = \"\"\"DELETE FROM CurriculumListings WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_CURRICULUM_COURSES, (course.name,))\n self.db_connection.commit()",
"def remove_course(self, course):\n DELETE_COURSE = \"\"\"DELETE FROM Course WHERE name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE, (course.name,))\n self.db_connection.commit()",
"def erase_scores(self):\n self.database.erase_scores(self.difficulty)",
"def delete():\n Course.print_all_crs()\n course_name = input(\"Please, type course name >\")\n c = Course(course_name)\n if c.is_course_exists():\n db = Course._file.read_db()\n for crs_i in range(len(db[\"courses\"])):\n if db[\"courses\"][crs_i][\"course_name\"] == course_name:\n del db[\"courses\"][crs_i]\n break\n Course._file.write_db(db)\n print(\"{} course is deleted\".format(course_name))\n else:\n print(\"Failed. {} course does not exist\".format(course_name))",
"def remove_course_goals(self, course):\n DELETE_COURSE_GOALS = \"\"\"DELETE FROM CourseGoals WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_GOALS, (course.name,))\n self.db_connection.commit()",
"def remove_section(self, section):\r\n for key in self.fields.keys():\r\n if key.startswith(section):\r\n del self.fields[key]",
"def clean_exam():\n data = Exam.objects.all()\n data.delete()",
"def remove_courses(self, *course_keys):\r\n entries = CourseAccessRole.objects.filter(user=self.user, role=self.role, course_id__in=course_keys)\r\n entries.delete()\r\n if hasattr(self.user, '_roles'):\r\n del self.user._roles",
"def test_delete_grading_period_courses(self):\r\n course_id = None # Change me!!\r\n id = None # Change me!!\r\n\r\n r = self.client.delete_grading_period_courses(id, course_id)",
"def delete_course_index(self, course_index):\r\n return self.course_index.remove(son.SON([('org', course_index['org']), ('offering', course_index['offering'])]))",
"def delete_course_enrollment(self, student_id, course_id, course_section_id, term):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n try:\n with conn:\n cursor.execute(\n \"\"\"\n DELETE FROM course_enrollments\n WHERE student_id = ?, course_id = ?, course_section_id = ?, term = ?\n (?,?,?,?)\"\"\",\n (student_id, course_id, course_section_id, term),\n )\n return 1\n except sqlite3.IntegrityError:\n return -1",
"def remove_course(self, key: str, index: int):\n if index >= len(self._semesters):\n raise IndexError(\"Index given was beyond the bounds of self._semesters\")\n return self._semesters[index].remove_course(key)",
"def remove_from_drawn(section: str, index: int):\r\n del drawn[section][index]",
"def experience_clean_row(row_of_data):\n experience = row_of_data.get('experience')\n z = list(set(remove_filler_words(experience)))\n return z",
"def remove_student(self, student: 'Student') -> None:\n # Subtract HOUSEHOLD attributes to the schools' composition\n self.total -= 1\n self.composition -= student.household.attributes # TODO: zero self.composition?\n self.students.pop(student.idx)\n # after removing a Student, there will always be space\n self.has_space = True",
"def remove_exercises_done_by_only_one_student(df, exercises):\n nb_student_by_exo = (\n df[[\"student_id\", \"exercise_code\"]]\n .drop_duplicates()\n .groupby(\"exercise_code\", as_index=False)\n .count()\n )\n exercises_to_keep = exercises\n exercises_to_remove = nb_student_by_exo[nb_student_by_exo[\"student_id\"] == 1][\n \"exercise_code\"\n ].values\n for exercise in exercises_to_remove:\n exercises_to_keep.remove(exercise)\n return exercises_to_keep",
"def filter_courses(original_courses_list, year, upper_bound, lower_bound, semester):\n filtered_courses_list = []\n\n for course in original_courses_list:\n if year is not None and course.year != year:\n continue\n if upper_bound is not None and course.grade > upper_bound:\n continue\n if lower_bound is not None and course.grade < lower_bound:\n continue\n if semester is not None and course.semester != semester:\n continue\n filtered_courses_list.append(course)\n\n return filtered_courses_list",
"def remove_enrollment(\n subject_code: str, student_ra: int, year: int, semester: int\n) -> dict:\n enrollment = (\n session.query(Enrollment)\n .filter_by(\n subject_code=subject_code,\n student_ra=student_ra,\n year=year,\n semester=semester,\n )\n .first()\n )\n\n if enrollment:\n session.delete(enrollment)\n session.commit()\n\n return {\"status\": 200, \"message\": \"Deleted successfully!\"}",
"def clear_subjects(db):\n\t\n\tfor p_hash, p in db.all_papers.items():\n\t\tif p.subject:\n\t\t\tp.subject = None",
"def unenroll_student(self, student_email):\n # check if course exists\n if not self.is_course_exists():\n print(\"The given course not found\")\n return\n\n if self.is_student_enrolled(student_email):\n db = self._file.read_db()\n for crs_i in range(len(db[\"courses\"])):\n if db[\"courses\"][crs_i][\"course_name\"] == self._course_name:\n if student_email in db[\"courses\"][crs_i][\"students\"]:\n db[\"courses\"][crs_i][\"students\"].remove(student_email)\n break\n self._file.write_db(db)\n print(\"The student with email : {} is unenrolled from {} course\".format(student_email, self._course_name))\n else:\n print(\"No matching student found by email : {}\".format(student_email))",
"def test_removing_course(self):\n a_user = User.objects.create(first_name=\"2\", last_name=\"test\", username=\"test\")\n user = VSBUser.objects.create(user=a_user)\n inst = Institution.objects.create(name = \"UVA\")\n c1 = Course.objects.create(name = \"CS 3240\", institution= inst)\n c2 = Course.objects.create(name = \"CS 1110\", institution= inst)\n\n user.add_course(c1)\n user.add_course(c2)\n user.remove_course(c1)\n\n expected = 1\n\n received = len(user.get_courses())\n\n self.assertEqual(received, expected, msg=\"Course_And_Topic.removing_course: Removing courses failed.\")",
"def test_student_id_exclude(self, db, course_dir):\n run_nbgrader([\"db\", \"assignment\", \"add\", \"ps1\", \"--db\", db])\n run_nbgrader([\"db\", \"student\", \"add\", \"foo\", \"--db\", db])\n run_nbgrader([\"db\", \"student\", \"add\", \"bar\", \"--db\", db])\n run_nbgrader([\"db\", \"student\", \"add\", \"baz\", \"--db\", db])\n self._copy_file(join(\"files\", \"submitted-unchanged.ipynb\"), join(course_dir, \"source\", \"ps1\", \"p1.ipynb\"))\n run_nbgrader([\"assign\", \"ps1\", \"--db\", db])\n\n for student in [\"foo\", \"bar\", \"baz\"]:\n self._copy_file(join(\"files\", \"submitted-unchanged.ipynb\"), join(course_dir, \"submitted\", student, \"ps1\", \"p1.ipynb\"))\n run_nbgrader([\"autograde\", \"ps1\", \"--db\", db])\n run_nbgrader([\"generate_feedback\", \"ps1\", \"--db\", db, \"--CourseDirectory.student_id_exclude=bar,baz\"])\n\n for student in [\"foo\", \"bar\", \"baz\"]:\n assert exists(join(course_dir, \"autograded\", \"foo\", \"ps1\", \"p1.ipynb\"))\n\n assert exists(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"p1.html\"))\n assert not exists(join(course_dir, \"feedback\", \"bar\", \"ps1\", \"p1.html\"))\n assert not exists(join(course_dir, \"feedback\", \"baz\", \"ps1\", \"p1.html\"))"
]
| [
"0.73677933",
"0.6780969",
"0.65231955",
"0.6249915",
"0.6122453",
"0.60941887",
"0.60487217",
"0.6046306",
"0.58382064",
"0.5802625",
"0.5710555",
"0.5664911",
"0.5640699",
"0.56190586",
"0.5608399",
"0.5597606",
"0.55732626",
"0.5532856",
"0.5515773",
"0.54833287",
"0.53617924",
"0.53431356",
"0.52803713",
"0.5278429",
"0.52784055",
"0.52657115",
"0.52437633",
"0.52348036",
"0.5210447",
"0.5208604"
]
| 0.776278 | 0 |
Function to remove occurances of course in section grades in the db | def remove_course_in_section_goal_grades(self, course):
DELETE_COURSE_SECTION_GOAL_GRADES = """DELETE FROM SectionGoalGrades WHERE course = %s"""
self.db_cursor.execute(DELETE_COURSE_SECTION_GOAL_GRADES, (course.name,))
self.db_connection.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_course_in_section_grades(self, course):\n DELETE_COURSE_SECTION_GRADES = \"\"\"DELETE FROM SectionGrades WHERE course = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_SECTION_GRADES, (course.name,))\n self.db_connection.commit()",
"def remove_course_in_section(self, course):\n DELETE_COURSE_SECTIONS = \"\"\"DELETE FROM Section WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_SECTIONS, (course.name,))\n self.db_connection.commit()",
"def remove_empty_courses(self):\n pass",
"def remove_course(roster, student, course):\r\n roster[student].remove(course)",
"def remove_course(self, course):\n if course == isinstance(course, list):\n for c in course:\n if c in self.courses:\n self.courses.remove(c)\n else:\n self.courses.remove(course)",
"def education_clean_row(row_of_data):\n education = row_of_data.get('education')\n z = list(set(remove_filler_words(education)))\n return z",
"def remove_course(self, key: str):\n if key in self._courses:\n self._total_load -= self._courses[key].credit_load\n self._total_diff -= self._courses[key].difficulty\n del self._courses[key]\n if len(self._courses) > 0:\n self._diff_rating = self._total_diff / len(self._courses)\n else:\n self._diff_rating = 0.0\n return True\n return False",
"def filter_sections(courses, selected_sections):\n for c in courses:\n c_key = f\"{c.name} {c.num}\"\n\n lab_section = selected_sections[c_key][\"lab\"]\n lecture_section = selected_sections[c_key][\"lecture\"]\n tutorial_section = selected_sections[c_key][\"tutorial\"]\n\n c.labs = [s for s in c.labs if s.section == lab_section]\n c.lectures = [s for s in c.lectures if s.section == lecture_section]\n c.tutorials = [s for s in c.tutorials if s.section == tutorial_section]",
"def remove_course_in_curriculum_listings(self, course):\n DELETE_CURRICULUM_COURSES = \"\"\"DELETE FROM CurriculumListings WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_CURRICULUM_COURSES, (course.name,))\n self.db_connection.commit()",
"def remove_course(self, course):\n DELETE_COURSE = \"\"\"DELETE FROM Course WHERE name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE, (course.name,))\n self.db_connection.commit()",
"def erase_scores(self):\n self.database.erase_scores(self.difficulty)",
"def delete():\n Course.print_all_crs()\n course_name = input(\"Please, type course name >\")\n c = Course(course_name)\n if c.is_course_exists():\n db = Course._file.read_db()\n for crs_i in range(len(db[\"courses\"])):\n if db[\"courses\"][crs_i][\"course_name\"] == course_name:\n del db[\"courses\"][crs_i]\n break\n Course._file.write_db(db)\n print(\"{} course is deleted\".format(course_name))\n else:\n print(\"Failed. {} course does not exist\".format(course_name))",
"def remove_course_goals(self, course):\n DELETE_COURSE_GOALS = \"\"\"DELETE FROM CourseGoals WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_GOALS, (course.name,))\n self.db_connection.commit()",
"def remove_section(self, section):\r\n for key in self.fields.keys():\r\n if key.startswith(section):\r\n del self.fields[key]",
"def clean_exam():\n data = Exam.objects.all()\n data.delete()",
"def remove_courses(self, *course_keys):\r\n entries = CourseAccessRole.objects.filter(user=self.user, role=self.role, course_id__in=course_keys)\r\n entries.delete()\r\n if hasattr(self.user, '_roles'):\r\n del self.user._roles",
"def test_delete_grading_period_courses(self):\r\n course_id = None # Change me!!\r\n id = None # Change me!!\r\n\r\n r = self.client.delete_grading_period_courses(id, course_id)",
"def delete_course_index(self, course_index):\r\n return self.course_index.remove(son.SON([('org', course_index['org']), ('offering', course_index['offering'])]))",
"def delete_course_enrollment(self, student_id, course_id, course_section_id, term):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n try:\n with conn:\n cursor.execute(\n \"\"\"\n DELETE FROM course_enrollments\n WHERE student_id = ?, course_id = ?, course_section_id = ?, term = ?\n (?,?,?,?)\"\"\",\n (student_id, course_id, course_section_id, term),\n )\n return 1\n except sqlite3.IntegrityError:\n return -1",
"def remove_course(self, key: str, index: int):\n if index >= len(self._semesters):\n raise IndexError(\"Index given was beyond the bounds of self._semesters\")\n return self._semesters[index].remove_course(key)",
"def remove_from_drawn(section: str, index: int):\r\n del drawn[section][index]",
"def experience_clean_row(row_of_data):\n experience = row_of_data.get('experience')\n z = list(set(remove_filler_words(experience)))\n return z",
"def remove_student(self, student: 'Student') -> None:\n # Subtract HOUSEHOLD attributes to the schools' composition\n self.total -= 1\n self.composition -= student.household.attributes # TODO: zero self.composition?\n self.students.pop(student.idx)\n # after removing a Student, there will always be space\n self.has_space = True",
"def remove_exercises_done_by_only_one_student(df, exercises):\n nb_student_by_exo = (\n df[[\"student_id\", \"exercise_code\"]]\n .drop_duplicates()\n .groupby(\"exercise_code\", as_index=False)\n .count()\n )\n exercises_to_keep = exercises\n exercises_to_remove = nb_student_by_exo[nb_student_by_exo[\"student_id\"] == 1][\n \"exercise_code\"\n ].values\n for exercise in exercises_to_remove:\n exercises_to_keep.remove(exercise)\n return exercises_to_keep",
"def filter_courses(original_courses_list, year, upper_bound, lower_bound, semester):\n filtered_courses_list = []\n\n for course in original_courses_list:\n if year is not None and course.year != year:\n continue\n if upper_bound is not None and course.grade > upper_bound:\n continue\n if lower_bound is not None and course.grade < lower_bound:\n continue\n if semester is not None and course.semester != semester:\n continue\n filtered_courses_list.append(course)\n\n return filtered_courses_list",
"def remove_enrollment(\n subject_code: str, student_ra: int, year: int, semester: int\n) -> dict:\n enrollment = (\n session.query(Enrollment)\n .filter_by(\n subject_code=subject_code,\n student_ra=student_ra,\n year=year,\n semester=semester,\n )\n .first()\n )\n\n if enrollment:\n session.delete(enrollment)\n session.commit()\n\n return {\"status\": 200, \"message\": \"Deleted successfully!\"}",
"def clear_subjects(db):\n\t\n\tfor p_hash, p in db.all_papers.items():\n\t\tif p.subject:\n\t\t\tp.subject = None",
"def unenroll_student(self, student_email):\n # check if course exists\n if not self.is_course_exists():\n print(\"The given course not found\")\n return\n\n if self.is_student_enrolled(student_email):\n db = self._file.read_db()\n for crs_i in range(len(db[\"courses\"])):\n if db[\"courses\"][crs_i][\"course_name\"] == self._course_name:\n if student_email in db[\"courses\"][crs_i][\"students\"]:\n db[\"courses\"][crs_i][\"students\"].remove(student_email)\n break\n self._file.write_db(db)\n print(\"The student with email : {} is unenrolled from {} course\".format(student_email, self._course_name))\n else:\n print(\"No matching student found by email : {}\".format(student_email))",
"def test_removing_course(self):\n a_user = User.objects.create(first_name=\"2\", last_name=\"test\", username=\"test\")\n user = VSBUser.objects.create(user=a_user)\n inst = Institution.objects.create(name = \"UVA\")\n c1 = Course.objects.create(name = \"CS 3240\", institution= inst)\n c2 = Course.objects.create(name = \"CS 1110\", institution= inst)\n\n user.add_course(c1)\n user.add_course(c2)\n user.remove_course(c1)\n\n expected = 1\n\n received = len(user.get_courses())\n\n self.assertEqual(received, expected, msg=\"Course_And_Topic.removing_course: Removing courses failed.\")",
"def test_student_id_exclude(self, db, course_dir):\n run_nbgrader([\"db\", \"assignment\", \"add\", \"ps1\", \"--db\", db])\n run_nbgrader([\"db\", \"student\", \"add\", \"foo\", \"--db\", db])\n run_nbgrader([\"db\", \"student\", \"add\", \"bar\", \"--db\", db])\n run_nbgrader([\"db\", \"student\", \"add\", \"baz\", \"--db\", db])\n self._copy_file(join(\"files\", \"submitted-unchanged.ipynb\"), join(course_dir, \"source\", \"ps1\", \"p1.ipynb\"))\n run_nbgrader([\"assign\", \"ps1\", \"--db\", db])\n\n for student in [\"foo\", \"bar\", \"baz\"]:\n self._copy_file(join(\"files\", \"submitted-unchanged.ipynb\"), join(course_dir, \"submitted\", student, \"ps1\", \"p1.ipynb\"))\n run_nbgrader([\"autograde\", \"ps1\", \"--db\", db])\n run_nbgrader([\"generate_feedback\", \"ps1\", \"--db\", db, \"--CourseDirectory.student_id_exclude=bar,baz\"])\n\n for student in [\"foo\", \"bar\", \"baz\"]:\n assert exists(join(course_dir, \"autograded\", \"foo\", \"ps1\", \"p1.ipynb\"))\n\n assert exists(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"p1.html\"))\n assert not exists(join(course_dir, \"feedback\", \"bar\", \"ps1\", \"p1.html\"))\n assert not exists(join(course_dir, \"feedback\", \"baz\", \"ps1\", \"p1.html\"))"
]
| [
"0.776278",
"0.6780969",
"0.65231955",
"0.6249915",
"0.6122453",
"0.60941887",
"0.60487217",
"0.6046306",
"0.58382064",
"0.5802625",
"0.5710555",
"0.5664911",
"0.5640699",
"0.56190586",
"0.5608399",
"0.5597606",
"0.55732626",
"0.5532856",
"0.5515773",
"0.54833287",
"0.53617924",
"0.53431356",
"0.52803713",
"0.5278429",
"0.52784055",
"0.52657115",
"0.52437633",
"0.52348036",
"0.5210447",
"0.5208604"
]
| 0.73677933 | 1 |
Function to remove course from the db | def remove_course(self, course):
DELETE_COURSE = """DELETE FROM Course WHERE name = %s"""
self.db_cursor.execute(DELETE_COURSE, (course.name,))
self.db_connection.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete():\n Course.print_all_crs()\n course_name = input(\"Please, type course name >\")\n c = Course(course_name)\n if c.is_course_exists():\n db = Course._file.read_db()\n for crs_i in range(len(db[\"courses\"])):\n if db[\"courses\"][crs_i][\"course_name\"] == course_name:\n del db[\"courses\"][crs_i]\n break\n Course._file.write_db(db)\n print(\"{} course is deleted\".format(course_name))\n else:\n print(\"Failed. {} course does not exist\".format(course_name))",
"def remove_course_in_section(self, course):\n DELETE_COURSE_SECTIONS = \"\"\"DELETE FROM Section WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_SECTIONS, (course.name,))\n self.db_connection.commit()",
"def remove_course_in_curriculum_listings(self, course):\n DELETE_CURRICULUM_COURSES = \"\"\"DELETE FROM CurriculumListings WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_CURRICULUM_COURSES, (course.name,))\n self.db_connection.commit()",
"def remove_course(roster, student, course):\r\n roster[student].remove(course)",
"def delete_course(self, course_key, user_id=None):\r\n index = self.db_connection.get_course_index(course_key)\r\n if index is None:\r\n raise ItemNotFoundError(course_key)\r\n # this is the only real delete in the system. should it do something else?\r\n log.info(u\"deleting course from split-mongo: %s\", course_key)\r\n self.db_connection.delete_course_index(index)",
"def delete_course(course_id: int, db: Session = Depends(get_db)):\n\n try:\n crud.course.delete(db, obj_id=course_id)\n except Exception as error:\n logger.error(f'{error}')\n raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f'{error}')\n\n return Response(status_code=status.HTTP_204_NO_CONTENT)",
"def remove():\n\n db_remove()",
"def remove_course_in_section_grades(self, course):\n DELETE_COURSE_SECTION_GRADES = \"\"\"DELETE FROM SectionGrades WHERE course = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_SECTION_GRADES, (course.name,))\n self.db_connection.commit()",
"def test_removing_course(self):\n a_user = User.objects.create(first_name=\"2\", last_name=\"test\", username=\"test\")\n user = VSBUser.objects.create(user=a_user)\n inst = Institution.objects.create(name = \"UVA\")\n c1 = Course.objects.create(name = \"CS 3240\", institution= inst)\n c2 = Course.objects.create(name = \"CS 1110\", institution= inst)\n\n user.add_course(c1)\n user.add_course(c2)\n user.remove_course(c1)\n\n expected = 1\n\n received = len(user.get_courses())\n\n self.assertEqual(received, expected, msg=\"Course_And_Topic.removing_course: Removing courses failed.\")",
"def delete_course(self, course_key, user_id): # lint-amnesty, pylint: disable=arguments-differ\n # this is the only real delete in the system. should it do something else?\n log.info(\"deleting course from split-mongo: %s\", course_key)\n self.delete_course_index(course_key)\n\n # We do NOT call the super class here since we need to keep the assets\n # in case the course is later restored.\n # super(SplitMongoModuleStore, self).delete_course(course_key, user_id)\n\n self._emit_course_deleted_signal(course_key)",
"def remove_curriculum_courses(self, curriculum):\n DELETE_FROM_CURRICULUM_LISTINGS = \"\"\"DELETE FROM CurriculumListings WHERE curriculum_name = %s\"\"\"\n\n try:\n self.db_cursor.execute(DELETE_FROM_CURRICULUM_LISTINGS, (curriculum.name,))\n self.db_connection.commit()\n except:\n logging.warning(\"DBAdapter: Error- Could not delete curriculum courses.\")",
"def remove_course_goals(self, course):\n DELETE_COURSE_GOALS = \"\"\"DELETE FROM CourseGoals WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_GOALS, (course.name,))\n self.db_connection.commit()",
"def delete_course(self, course_key, user_id=None):\r\n course_query = self._course_key_to_son(course_key)\r\n self.collection.remove(course_query, multi=True)",
"def remove_course(self, key: str):\n if key in self._courses:\n self._total_load -= self._courses[key].credit_load\n self._total_diff -= self._courses[key].difficulty\n del self._courses[key]\n if len(self._courses) > 0:\n self._diff_rating = self._total_diff / len(self._courses)\n else:\n self._diff_rating = 0.0\n return True\n return False",
"def remove_course(self, course):\n if course == isinstance(course, list):\n for c in course:\n if c in self.courses:\n self.courses.remove(c)\n else:\n self.courses.remove(course)",
"def create_course_for_deletion(self):\r\n course = modulestore().create_course('nihilx', 'deletion', 'deleting_user')\r\n root = course.location.version_agnostic().for_branch('draft')\r\n for _ in range(4):\r\n self.create_subtree_for_deletion(root, ['chapter', 'vertical', 'problem'])\r\n return modulestore().get_item(root)",
"def test_course_delete(self, app, auth):\n app.admin.add_new_course()\n course_data = CreateCourse.random()\n app.course.create_course(course_data)\n app.admin.manage_courses()\n app.course.delete_course()\n assert (course_data.short_course_name in app.course.sure_delete()), \\\n \"The course was not deleted!\"",
"def remove_course_in_section_goal_grades(self, course):\n DELETE_COURSE_SECTION_GOAL_GRADES = \"\"\"DELETE FROM SectionGoalGrades WHERE course = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_SECTION_GOAL_GRADES, (course.name,))\n self.db_connection.commit()",
"def _rm_edx4edx(self):\r\n def_ms = modulestore()\r\n course_path = '{0}/edx4edx_lite'.format(\r\n os.path.abspath(settings.DATA_DIR))\r\n try:\r\n # using XML store\r\n course = def_ms.courses.get(course_path, None)\r\n except AttributeError:\r\n # Using mongo store\r\n course = def_ms.get_course(SlashSeparatedCourseKey('MITx', 'edx4edx', 'edx4edx'))\r\n\r\n # Delete git loaded course\r\n response = self.client.post(\r\n reverse('sysadmin_courses'),\r\n {\r\n 'course_id': course.id.to_deprecated_string(),\r\n 'action': 'del_course',\r\n }\r\n )\r\n self.addCleanup(self._rm_glob, '{0}_deleted_*'.format(course_path))\r\n\r\n return response",
"def test_mongo_course_add_delete(self):\r\n\r\n self._setstaff_login()\r\n self._mkdir(getattr(settings, 'GIT_REPO_DIR'))\r\n\r\n def_ms = modulestore()\r\n self.assertFalse(isinstance(def_ms, XMLModuleStore))\r\n\r\n self._add_edx4edx()\r\n course = def_ms.get_course(SlashSeparatedCourseKey('MITx', 'edx4edx', 'edx4edx'))\r\n self.assertIsNotNone(course)\r\n\r\n self._rm_edx4edx()\r\n course = def_ms.get_course(SlashSeparatedCourseKey('MITx', 'edx4edx', 'edx4edx'))\r\n self.assertIsNone(course)",
"def remove_course(self, term, schedule, crn):\n query = {'Term': term.code,\n 'Schedule': schedule,\n 'CourseID': crn,\n 'ShowDebug': 0,\n '_': int(float(time.time()) * 10**3)}\n\n self.get(self.REMOVE_COURSE_ENDPOINT, params=query)",
"def delete_course_index(self, course_index):\r\n return self.course_index.remove(son.SON([('org', course_index['org']), ('offering', course_index['offering'])]))",
"def test_delete_lecture(lecture_class, course, valid_datetime):\n id = lecture_class.create_lecture(course, valid_datetime)\n assert id != None\n assert lecture_class.delete_lecture()",
"def remove_course(self, key: str, index: int):\n if index >= len(self._semesters):\n raise IndexError(\"Index given was beyond the bounds of self._semesters\")\n return self._semesters[index].remove_course(key)",
"def delete_course(modulestore, contentstore, course_key, commit=False):\r\n\r\n # check to see if the source course is actually there\r\n if not modulestore.has_course(course_key):\r\n raise Exception(\"Cannot find a course at {0}. Aborting\".format(course_key))\r\n\r\n if commit:\r\n print \"Deleting assets and thumbnails {}\".format(course_key)\r\n contentstore.delete_all_course_assets(course_key)\r\n\r\n # finally delete the course\r\n print \"Deleting {0}...\".format(course_key)\r\n if commit:\r\n modulestore.delete_course(course_key, '**replace-user**')\r\n\r\n return True",
"def test_delete_course(self):\r\n module_store = modulestore('direct')\r\n\r\n content_store = contentstore()\r\n draft_store = modulestore('draft')\r\n\r\n _, course_items = import_from_xml(module_store, 'common/test/data/', ['toy'], static_content_store=content_store)\r\n\r\n course_id = course_items[0].id\r\n\r\n # get a vertical (and components in it) to put into 'draft'\r\n vertical = module_store.get_item(course_id.make_usage_key('vertical', 'vertical_test'), depth=1)\r\n\r\n draft_store.convert_to_draft(vertical.location)\r\n for child in vertical.get_children():\r\n draft_store.convert_to_draft(child.location)\r\n\r\n # delete the course\r\n delete_course(module_store, content_store, course_id, commit=True)\r\n\r\n # assert that there's absolutely no non-draft modules in the course\r\n # this should also include all draft items\r\n items = module_store.get_items(course_id)\r\n self.assertEqual(len(items), 0)\r\n\r\n # assert that all content in the asset library is also deleted\r\n assets, count = content_store.get_all_content_for_course(course_id)\r\n self.assertEqual(len(assets), 0)\r\n self.assertEqual(count, 0)",
"def test_delete_entry_courses(self):\r\n course_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n id = None # Change me!!\r\n\r\n r = self.client.delete_entry_courses(id, topic_id, course_id)",
"def remove_course_topics(self, course):\n DELETE_COURSE_TOPICS = \"\"\"DELETE FROM CourseTopics WHERE course_name = %s\"\"\"\n\n self.db_cursor.execute(DELETE_COURSE_TOPICS, (course.name,))\n self.db_connection.commit()",
"def remove_curriculum(self, curriculum):\n DELETE_CURRICULUM = \"\"\"DELETE FROM Curriculum WHERE name = %s\"\"\"\n\n try:\n self.db_cursor.execute(DELETE_CURRICULUM, (curriculum.name,))\n self.db_connection.commit()\n except:\n logging.warning(\"DBAdapter: Error- Could not delete curriculum.\")",
"def edit_course(self, course):\n EDIT_COURSE = \"\"\"UPDATE Course SET subject_code = %s, credit_hours = %s, description = %s WHERE name = %s\"\"\"\n\n self.db_cursor.execute(EDIT_COURSE, (\n course.subject_code, course.credit_hours, course.description, course.name))\n self.db_connection.commit()\n\n DELETE_COURSE_TOPICS = \"\"\"DELETE FROM CourseTopics WHERE course_name = %s\"\"\"\n self.db_cursor.execute(DELETE_COURSE_TOPICS, (course.name,))\n self.db_connection.commit()\n INSERT_COURSE_TOPICS = \"\"\"INSERT INTO CourseTopics (course_name, topic_id) VALUES (%s, %s)\"\"\"\n for ct in course.topics:\n self.db_cursor.execute(INSERT_COURSE_TOPICS, (course.name,ct))\n self.db_connection.commit()\n\n DELETE_COURSE_GOALS = \"\"\"DELETE FROM CourseGoals WHERE course_name = %s\"\"\"\n self.db_cursor.execute(DELETE_COURSE_GOALS, (course.name,))\n self.db_connection.commit()\n INSERT_COURSE_GOALS = \"\"\"INSERT INTO CourseGoals (course_name, goal_id) VALUES (%s, %s)\"\"\"\n for cg in course.goals:\n self.db_cursor.execute(INSERT_COURSE_GOALS, (course.name, cg))\n self.db_connection.commit()"
]
| [
"0.79385114",
"0.7593171",
"0.74404275",
"0.7331626",
"0.7117368",
"0.70794225",
"0.69154245",
"0.6879456",
"0.68416655",
"0.6762682",
"0.67380595",
"0.669831",
"0.6666423",
"0.6628351",
"0.66240865",
"0.65819556",
"0.6554197",
"0.652338",
"0.6522206",
"0.6491164",
"0.6485401",
"0.64511317",
"0.64380246",
"0.6437801",
"0.6394615",
"0.63845485",
"0.6345613",
"0.63323843",
"0.63129836",
"0.62385523"
]
| 0.8314269 | 0 |
Stores the old gradient table for recalibration purposes. | def store_old_table(self):
for group in self.param_groups:
for p in group['params']:
gk = p.grad.data
param_state = self.state[p]
gktbl = param_state['gktbl']
gavg = param_state['gavg']
param_state['gktbl_old'] = gktbl.clone()
param_state['gavg_old'] = gavg.clone() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _UpdateGradient(self):\n self.mol.GetGradient('analytic')",
"def gradients(self):\n return {}",
"def post_gradient_application(self, sess: tf.Session) -> None:\n pass",
"def save_and_step(self):\n self.last_grads = [param.grad for param in self.agent.model.parameters()]\n self.optimizer.pytorch_step()",
"def _save_grad_output(self, mod, grad_input, grad_output):\n if mod.training:\n self.state[mod][\"gy\"] = grad_output[0] * grad_output[0].size(0)",
"def handle_gradient(self):\n self._optimizer.sync_grad()",
"def store_grad(pp, grads, grad_dims, tid):\n # store the gradients\n grads[:, tid].fill_(0.0)\n cnt = 0\n for param in pp():\n if param.grad is not None:\n beg = 0 if cnt == 0 else sum(grad_dims[:cnt])\n en = sum(grad_dims[:cnt + 1])\n grads[beg: en, tid].copy_(param.grad.data.view(-1))\n cnt += 1",
"def storeState(self):\n\n self.action_history[self.trial] = self.action\n self.ball_history[self.trial] = self.ballcolor",
"def save_state(self):\n\t\tself._history['time'].append(self.t)\n\t\tstate = np.array(self.x[np.newaxis,:,:])\n\t\tself._history['state'] = np.vstack([self._history['state'],state])",
"def pass_gradients(self):\n return self.last_grads",
"def update_gradients(self, grads):\n self.variance.gradient = grads[0]",
"def _save_state(self):\n with open(self.histFile,'wb') as hf:\n hf.write(self.dbFile.Value)",
"def _reset_histories(self):\n self.train_loss_history = []\n self.train_pos_dist_history = []\n self.train_neg_dist_history = []\n self.val_loss_history = []\n self.val_pos_dist_history = []\n self.val_neg_dist_history = []",
"def save_feedback_gradients(self, reconstruction_loss):\n self.reconstruction_loss = reconstruction_loss.item()\n if self.feedbackbias is not None:\n grads = torch.autograd.grad(reconstruction_loss, [\n self.feedbackweights, self.feedbackbias], retain_graph=False)\n self._feedbackbias.grad = grads[1].detach()\n else:\n grads = torch.autograd.grad(reconstruction_loss,\n self.feedbackweights,\n retain_graph=False\n )\n self._feedbackweights.grad = grads[0].detach()",
"def _reset_gradients(self):\n self.grad = None # gradient itself\n self.grad_fn = None # functions to call for gradient\n self.grad_expected = 0 # number of gradients expected from parents\n self.grad_received = 0 # number of gradients received from parents\n self.children = [] # children of node in graph\n self.ctx = AutogradContext() # contexts for AutogradFunctions",
"def _reset_stored(self):\n ## Main information\n self.idxs = None\n self.sp_relative_pos = None\n self._setted = False\n self.ks = None\n self.iss = [0]",
"def backward(self, gradient):\n #TODO\n pass",
"def backward(self, gradient):\n #TODO\n pass",
"def perform_update(self, gradient):\n w = sys.modules[self.shared_mem_name].__dict__[\"w\"]\n w -= self.learning_rate * gradient",
"def _reset_histories(self):\n\t\tself.train_loss_history = []\n\t\tself.train_acc_history = []\n\t\tself.val_acc_history = []\n\t\tself.val_loss_history = []",
"def _store(self):\n if self._data is not None:\n store_dict = {\"data\": ObjectTable(data={\"data\": [self._data]})}\n\n if self.f_has_range():\n store_dict[\"explored_data\"] = ObjectTable(\n data={\"data\": self._explored_range}\n )\n\n self._locked = True\n\n return store_dict",
"def reset(self):\r\n self.look_up_table = list(map(convert_to_list, self.const_look_up_table))",
"def update_gradients(self, dw, db):\n self.w = self.w - self.lr * dw\n self.b = self.b - (self.lr * db)",
"def set_gradient(self, gradient, relink=None):\n # Fast path 'set(get())'-like\n if gradient is self._gradient:\n return\n # Assignment\n if (self._config.relink if relink is None else relink):\n tools.relink(tools.grads_of(self._model.parameters()), gradient)\n self._gradient = gradient\n else:\n self.get_gradient().copy_(gradient, non_blocking=self._config[\"non_blocking\"])",
"def save_board_state(self):\n self.board_states.append([copy.deepcopy(self.stock), copy.deepcopy(self.wp), \n copy.deepcopy(self.foundations), copy.deepcopy(self.tableaus)])",
"def push_gradients(self, grad):\n\n self._gradients.append(grad)",
"def reinit(self):\n self.data_updating = {}\n self.reinitialization = True\n # force the bounds to be defined again\n self.bounds = None",
"def _save_rep_prof_index_internally(self):\n\n self.data.solar_meta[self.__solar_rpi_n] = self.data.solar_meta.index\n self.data.wind_meta[self.__wind_rpi_n] = self.data.wind_meta.index",
"def update_grad_data():\n t_file = 'hcapgrd1_full_data_*.fits*'\n out_dir = deposit_dir + '/Grad_save/'\n tdir = out_dir + 'Gradcap/'\n#\n#--- read grad group name\n#\n gfile = house_keeping + 'grad_list'\n grad_list = mcf.read_data_file(gfile)\n\n [tstart, tstop, year] = ecf.find_data_collecting_period(tdir, t_file)\n\n get_data(tstart, tstop, year, grad_list, out_dir)",
"def GradientAdjuster(self):\n pass"
]
| [
"0.5903432",
"0.57772994",
"0.574436",
"0.5678036",
"0.55876976",
"0.55809575",
"0.55665493",
"0.55653405",
"0.55429",
"0.552062",
"0.55082816",
"0.5454636",
"0.54110724",
"0.5403632",
"0.5389521",
"0.53057986",
"0.526381",
"0.526381",
"0.5259402",
"0.52561975",
"0.52488196",
"0.5236595",
"0.52230304",
"0.5218837",
"0.5216144",
"0.5202015",
"0.5194137",
"0.51895726",
"0.51858735",
"0.5184036"
]
| 0.69276756 | 0 |
Returns a named plate type 96 or 384 according to num_wells | def create_plate(num_wells, name):
rows, cols = calc.rows_columns(int(num_wells))
new_plate = plate.Plate(rows, cols, name)
return new_plate | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def return_agar_plates(wells=6):\n if wells == 6:\n plates = {\"lb_miller_50ug_ml_kan\": \"ki17rs7j799zc2\",\n \"lb_miller_100ug_ml_amp\": \"ki17sbb845ssx9\",\n \"lb_miller_100ug_ml_specto\": \"ki17sbb9r7jf98\",\n \"lb_miller_100ug_ml_cm\": \"ki17urn3gg8tmj\",\n \"lb_miller_noAB\": \"ki17reefwqq3sq\"}\n elif wells == 1:\n plates = {\"lb_miller_50ug_ml_kan\": \"ki17t8j7kkzc4g\",\n \"lb_miller_100ug_ml_amp\": \"ki17t8jcebshtr\",\n \"lb_miller_100ug_ml_specto\": \"ki17t8jaa96pw3\",\n \"lb_miller_100ug_ml_cm\": \"ki17urn592xejq\",\n \"lb_miller_noAB\": \"ki17t8jejbea4z\"}\n else:\n raise ValueError(\"Wells has to be an integer, either 1 or 6\")\n return (plates)",
"def add_sample_to_plate(sample_name, plate_barcode):\n plate_obj = Plate(plate_barcode)\n no_of_wells = plate_obj.get_well_count()\n well_position = well_helper.get_empty_well_position(plate_barcode, no_of_wells)\n well_obj = Well(plate_barcode, well_position)\n well_obj.add_sample_to_well(sample_name)",
"def get_well_name(self, idx):\n name = None\n if type(idx) is int:\n n = self.well_count()\n assert 0 <= idx <= n - 1, \"Bad well index\"\n name = self.wells[idx].drawdown.name\n return(name)",
"def get_slot_and_well_name(_in, pipette_type):\n _rows = 'ABCDEFGH'\n if isinstance(_in, tuple):\n _in = _in[0]\n _in_slot = _in.get_parent().get_parent().get_name()\n _in_name = _in.get_name()\n if pipette_type == 'multi' and _in_name.startswith('A'):\n # find column name. If regex fails, return the whole well name\n re_out = re.findall(r'\\d+$', _in_name)\n if len(re_out) == 0:\n raise ValueError('Regex failed: cannot find a number at the end of the well name??')\n _col = re_out[0]\n # expand column into 8 wells\n _in_names = []\n _in_slots = []\n for _row in _rows:\n _in_names.append(_row+_col)\n _in_slots.append(_in_slot)\n return _in_slots, _in_names\n else:\n return [_in_slot], [_in_name]",
"def get_type(cmb_type):\n\n terminology = ['Boulder','Trad','Sport','TR','Aid','Ice','Mixed','Alpine','Chipped']\n\n kind = {}\n kind_pitches_feet = str(cmb_type).split(', ')\n for morsel in kind_pitches_feet:\n if morsel in terminology:\n # columns end up either True or NaN\n kind[morsel.lower()] = True\n elif pitchRE.search(morsel):\n kind['pitches'] = morsel.split(' ')[0]\n elif feetRE.search(morsel):\n kind['feet'] = float(morsel[:-1])\n elif commitmentRE.search(morsel):\n kind['commitment'] = morsel.split(' ')[-1]\n return kind",
"def _check_well_type(well_type: str):\n group = None\n if well_type.lower() == 'inj':\n group = 'GCONI'\n elif well_type.lower() == 'prod':\n group = 'GCONP'\n else:\n raise ValueError('Well type inj or prod')\n return group",
"def getwellid(infile, wellinfo):\r\n m = re.search(\"\\d\", getfilename(infile))\r\n s = re.search(\"\\s\", getfilename(infile))\r\n if m.start() > 3:\r\n wellname = getfilename(infile)[0:m.start()].strip().lower()\r\n else:\r\n wellname = getfilename(infile)[0:s.start()].strip().lower()\r\n wellid = wellinfo[wellinfo['Well'] == wellname]['WellID'].values[0]\r\n return wellname, wellid",
"def license_plate(self) -> str:\n temp = re.sub(\n r\"\\?\",\n lambda x: self.random_element(self.ascii_uppercase_azerbaijan),\n self.random_element(self.license_formats),\n )\n temp = temp.replace(\"##\", self.random_element(self.license_plate_initial_numbers), 1)\n # temp = temp.format(self.random_element(range(1, 999)))\n return self.numerify(temp)",
"def license_plate(self) -> str:\n return self.numerify(self.generator.parse(self.random_element(self.license_formats)))",
"def _compress_plate(self, out_plate, in_plate, row_pad, col_pad, volume=1):\n with sql_connection.TRN:\n layout = in_plate.layout\n for row in layout:\n for well in row:\n # The row/col pair is stored in the DB starting at 1\n # subtract 1 to make it start at 0 so the math works\n # and re-add 1 at the end\n out_well_row = (((well.row - 1) * 2) + row_pad) + 1\n out_well_col = (((well.column - 1) * 2) + col_pad) + 1\n out_well = container_module.Well.create(\n out_plate, self, volume, out_well_row, out_well_col)\n composition_module.GDNAComposition.create(\n self, out_well, volume,\n well.composition.sample_composition)",
"def _generate_raw_file_name(self, well, channel, desc):\n \n return \"bPLATE_w\" + well + \"_\" + desc + \"_c\" + channel + \".png\"",
"def wt_strains(df):\n \n ts_plates = []\n dma_plates = []\n for plate in df.Plate.unique():\n if ('_26C_' in plate) or ('_37C_' in plate):\n ts_plates.append(plate)\n else:\n dma_plates.append(plate)\n\n wt_strain_ids_dma = df[(df['ORF'].isin(['YOR202W'])) &\n (df['Plate'].isin(dma_plates))]['Strain ID'].unique()\n wt_strain_ids_ts = df[(df['ORF'].isin(['YOR202W', 'YMR271C'])) &\n (df['Plate'].isin(ts_plates))]['Strain ID'].unique()\n wt_strain_ids = np.append(wt_strain_ids_ts, wt_strain_ids_dma)\n \n return wt_strain_ids",
"def save_plate(assumptions, plate):\n if assumptions['save_plate']:\n import dill as pickle\n with open(assumptions['output_dir'] + assumptions['exp_id'] + \".p\", \"wb\") as f:\n pickle.dump(plate, f)",
"def reorder_plates(self, plates):\n # Re-order the well-numbers of aliquots according to the \"well-number\"\n # This allows analyst to re-order wells if aliquots were transposed\n newplates = []\n for plate_nr, plate in enumerate(plates):\n newplate = plate.copy()\n # a list to check for used well numbers, to prevent user from\n # setting the same well-number twice on a plate when re-ordering\n _used_wells = []\n for w_nr in range(1, 9):\n w_nr = str(w_nr)\n nw = plate['well-number-%s' % w_nr]\n if nw in _used_wells:\n msg = \"Well number %s on plate %s\" % (nw, plate_nr + 1)\n raise DuplicateWellSelected(msg)\n _used_wells.append(nw)\n for c_nr in range(1, 5):\n key = 'chip-%s_well-%s'\n newplate[key % (c_nr, nw)] = plate[key % (c_nr, w_nr)]\n newplates.append(newplate)\n return newplates",
"def license_plate_mercosur(self) -> str:\n\n first_letter: str = self.random_element(self.license_plate_new_first_letter)\n second_letter: str = self.random_element(self.license_plate_new_second_letter)\n\n format = \"###??\"\n plate = first_letter + second_letter\n\n return self.bothify(plate + format).upper()",
"def license_plate(self) -> str:\n prefix: str = self.random_element(self.license_plate_prefix)\n suffix = self.bothify(\n self.random_element(self.license_plate_suffix),\n letters=string.ascii_uppercase,\n )\n return prefix + suffix",
"def get_specnos(self, spectra_file):\n\n self.plate_num = spectra_file[-23:-19]\n print(\"Fitting spectra in plate %s\" % self.plate_num)\n self.spectra = np.load(spectra_file) \n self.this_plate = self.MetaData[self.MetaData['PLATE'] == int(self.plate_num)]\n\n if self.ttype == 'test':\n max_num = 10 \n self.specnos = self.this_plate[0: max_num]['SPECNO']\n elif self.ttype == 'blue':\n max_num = len(self.spectra)-1\n self.specnos = self.this_plate[(self.this_plate['CAMERAS'] == b'b1') | (self.this_plate['CAMERAS'] == b'b2')]['SPECNO']\n elif self.ttype == 'red':\n max_num = len(self.spectra)-1\n self.specnos = self.this_plate[(self.this_plate['CAMERAS'] == b'r1') | (self.this_plate['CAMERAS'] == b'r2')]['SPECNO']\n elif self.ttype == 'full':\n max_num = len(self.spectra)\n self.specnos = self.this_plate['SPECNO']\n else: \n print(\"not a valid type. Going to test\")\n max_num = 10 #len(spectra) Number of spectra in a given plate that you want to run this for. Mostly for debugging\n self.specnos = np.random.choice(self.this_plate['SPECNO'], size=max_num)\n\n self.spectra_length = len(self.specnos)",
"def test_incorrect_data_type_plate():\n \n test_object = fa.read_in_envision(data_csv=list_A, platemap_csv=plate_map_file, data_type='plate', size=384)",
"def plate_recognition(plate):\r\n cv2.destroyAllWindows()\r\n print(\"Without preprocessing: \")\r\n cv2.imshow('Plate', plate)\r\n print(\"Pytesseract: {}\".format(pytesseract.image_to_string(plate)))\r\n img = Image.fromarray(plate)\r\n print(\"OCR: {}\".format(tesserocr.image_to_text(img)))\r\n\r\n print(\"With preprocessing: \")\r\n image = cv2.cvtColor(plate, cv2.COLOR_BGR2GRAY)\r\n image = cv2.bilateralFilter(image, 11, 17, 17)\r\n image = cv2.threshold(image, 177, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\r\n cv2.imshow('Processed Plate', image)\r\n print(\"Pytesseract: {}\".format(pytesseract.image_to_string(image)))\r\n img = Image.fromarray(image)\r\n print(\"OCR: {}\".format(tesserocr.image_to_text(img)))\r\n cv2.waitKey(0)",
"def plot_plate(plate,labels=None,size=None,\n hue=None,hue_order=None,palette=None,\n text_hue=False,text_hue_order=None,text_palette=None,\n edgecolors=None,edgecolors_order=None,edgecolors_palette=None,\n default_color=[0,0,0],\n wells=96,\n ax=None,\n text_kwargs={},**kwargs):\n import numbers\n\n\n plate = fortify_plate(plate)\n if wells is None:\n wells = infer_plate_size(plate.index)\n\n shape = plates[wells]\n xs = np.arange(shape[1])\n ys = np.arange(shape[0])\n xx, yy = np.meshgrid(xs,ys)\n if ax is None: ax = plt.gca()\n\n row_labels = list(map(row2letters, ys))\n col_labels = list(map(lambda x: str(x+1), xs))\n ss = None\n cs = None\n ecs = None\n if size is not None:\n ss = np.zeros(shape[0]*shape[1])\n if isinstance(size,numbers.Number):\n ss += size\n size = None\n if hue is not None:\n cs = np.zeros((shape[0]*shape[1],3))\n hue_map = parse_hue(plate[hue], hue_order, palette)\n\n if text_hue is not None and text_hue != True and text_hue != False:\n text_cs = np.zeros((shape[0]*shape[1],3))\n text_hue_map = parse_hue(plate[text_hue], text_hue_order, text_palette)\n\n if edgecolors is not None:\n ecs = np.zeros((shape[0]*shape[1],3))\n edgecolors_map = parse_hue(plate[edgecolors], edgecolors_order, edgecolors_palette)\n\n\n # Iterate across each well of the plate\n for i,row in enumerate(ys):\n for j,col in enumerate(xs):\n well = row_labels[row]+col_labels[col]\n if well not in plate.index:\n continue\n if size is not None:\n ss[row*shape[1]+col] = plate.loc[well,size]\n if hue is not None:\n cs[row*shape[1]+col,:] = hue_map.get(plate.loc[well,hue],default_color)\n if edgecolors is not None:\n ecs[row*shape[1]+col,:] = edgecolors_map.get(plate.loc[well,edgecolors],default_color)\n if labels is not None:\n if labels == True:\n label = [well]\n elif isinstance(labels,str):\n label = [plate.loc[well,labels]]\n elif isinstance(labels,list):\n label = [plate.loc[well,l] for l in labels]\n\n if text_hue == True and hue is not None:\n if isinstance(hue, str):\n color = hue_map.get(plate.loc[well,hue],default_color)\n else:\n color = hue_map.get(tuple(plate.loc[well,hue]),default_color)\n elif text_hue != False:\n if isinstance(text_hue, str):\n color = text_hue_map.get(plate.loc[well,text_hue],default_color)\n else:\n color = text_hue_map.get(tuple(plate.loc[well,text_hue]),default_color)\n else:\n color = default_color\n\n for k,txt in enumerate(label):\n plt.annotate(\n txt,\n xy=(col, row), xytext=( col, row-0.2+(0.8*k*1/len(label)) ),\n textcoords=plt.gca().transData, #textcoords='offset points',\n ha='center', va='baseline',\n color=color, **text_kwargs)\n\n plt.scatter(xx,yy,c=cs,s=ss, edgecolors=ecs, **kwargs)\n ax.invert_yaxis()\n ax.xaxis.tick_top()\n plt.yticks(ys, row_labels)\n plt.xticks(xs, col_labels)",
"def _get_name_constellation_specific(self) -> str:\n\n try:\n if self.is_archived:\n footprint_path = files.get_archived_path(self.path, r\".*\\.shp\")\n else:\n footprint_path = next(self.path.glob(\"*.shp\"))\n except (FileNotFoundError, StopIteration):\n raise InvalidProductError(\n \"Footprint shapefile cannot be found in the product!\"\n )\n\n # Open identifier\n name = files.get_filename(footprint_path)\n\n return name",
"def generate_license_plate(self, num):\n license_plate = []\n for _ in range(num):\n license_plate.append(self.fake.license_plate())\n return license_plate",
"def plate(self):\n with sql_connection.TRN as TRN:\n sql = \"\"\"SELECT DISTINCT plate_id\n FROM qiita.container\n LEFT JOIN qiita.well USING (container_id)\n LEFT JOIN qiita.plate USING (plate_id)\n WHERE latest_upstream_process_id = %s\"\"\"\n TRN.add(sql, [self.id])\n plate_id = TRN.execute_fetchlast()\n return plate_module.Plate(plate_id)",
"def card(n):\r\n assert type(n) == int and n > 0 and n <= 13, \"Bad card n\"\r\n specials = {1: 'A', 11: 'J', 12: 'Q', 13: 'K'}\r\n return specials.get(n, str(n))",
"def _get_wells(self):\n wells = []\n for well in self.plate_meta['wells']:\n wells.append(well['path'])\n self.wells = wells",
"def __init__(self,\n samples,\n names = None,\n percentgel = Q_(1.0,'(g/(100 mL))*100'), # grams agarose/100 mL buffer * 100\n electrfield = Q_(5.0, 'V/cm'),\n temperature = Q_(295.15,'K'),\n gel_len = Q_(8,'cm'),\n wellx = Q_(7,'mm'),\n welly = Q_(2,'mm'),\n wellz = Q_(1,'mm'), # mm ######################### ??? ###\n wellsep = Q_(2,'mm') # mm\n ):\n \n self.samples = samples # assumes len(DNA) in bp #\n self.names = names if names else ['lane'+str(i) for i in #\n xrange(1, len(samples)+1)] #\n self.percent = to_units(percentgel, '(g/(100 mL))*100', 'percentgel') # agarose percentage\n self.field = to_units(electrfield, 'V/cm', 'electrfield') # electric field intensity\n self.temperature = to_units(temperature, 'K', 'temperature') # absolute temperature\n self.gel_len = to_units(gel_len, 'cm', 'gel_len') # lane length\n self.wellx = to_units(wellx, 'mm', 'wellx') # well width\n self.welly = to_units(welly, 'mm', 'welly') # well height\n self.wellz = (to_units(wellz, 'mm', 'wellz') if wellz is not None\n else wellz) # well depth\n self.wellsep = to_units(wellsep, 'mm', 'wellsep') # separation between wells\n # Volumes\n wellVol = self.wellx * self.welly * self.wellz\n wellVol.ito('ul')\n defaulVol = 0.85 * wellVol\n volumes = []\n for sample in self.samples:\n vol = sample.volume\n if not np.isnan(vol) and vol is not None:\n volumes.append(vol)\n else:\n volumes.append(defaulVol)\n self.volumes = to_units(volumes, 'uL', 'volumes')\n # Quantities\n defaulQty = Q_(150,'ng')\n self.quantities = assign_quantitiesB(self.samples, defaulQty)\n #self.quantities = assign_quantities(self.samples, quantities, defaulQty)\n self.runtime = np.nan ##########\n self.freesol_mob = None\n self.mobilities = []\n self.distances = []\n self.bandwidths0 = []\n self.bandwidthsI = []\n self.bandwidths = []\n self.intensities = []\n self.DNAspace_for_mu0 = logspace_int(100, 3000, 10)*ureg.bp # exponential space of DNA sizes\n self.DNAspace_for_vWBRfit = np.linspace(100, 50000, 100)*ureg.bp\n self.Tvals_for_mu0 = []\n self.H2Oviscosity = None\n self.accel_to_plateau = None\n self.equil_to_accel = None\n self.Zimm_to_Rouse = None\n self.poresize = None\n self.poresize_fit = None\n self.vWBR_muS = None\n self.vWBR_muL = None\n self.vWBR_gamma = None",
"def conv_build_type_nb_to_name(build_type):\n if build_type is None:\n msg = 'build_type is None. Going to return None for build_name.'\n warnings.warn(msg)\n return None\n\n dict_b_name = {\n 0: 'Residential',\n 1: 'Office (simulation)',\n 2: 'Main construction work',\n 3: 'Finishing trade construction work',\n 4: 'Bank and insurance',\n 5: 'Public institution',\n 6: 'Non profit organization',\n 7: 'Small office buildings',\n 8: 'Other services',\n 9: 'Metal',\n 10: 'Automobile',\n 11: 'Wood and timber',\n 12: 'Paper',\n 13: 'Small retailer for food',\n 14: 'Small retailer for non-food',\n 15: 'Large retailer for food',\n 16: 'Large retailer for non-food',\n 17: 'Primary school',\n 18: 'School for physically handicapped',\n 19: 'High school',\n 20: 'Trade school',\n 21: 'University',\n 22: 'Hotel',\n 23: 'Restaurant',\n 24: 'Childrens home',\n 25: 'Backery',\n 26: 'Butcher',\n 27: 'Laundry',\n 28: 'Farm primary agriculture ',\n 29: 'Farm with 10 - 49 cattle units',\n 30: 'Farm with 50 - 100 cattle units',\n 31: 'Farm with more than 100 cattle units',\n 32: 'Gardening',\n 33: 'Hospital',\n 34: 'Library',\n 35: 'Prison',\n 36: 'Cinema',\n 37: 'Theater',\n 38: 'Parish hall',\n 39: 'Sports hall',\n 40: 'Multi purpose hall',\n 41: 'Swimming hall',\n 42: 'Club house',\n 43: 'Fitness studio',\n 44: 'Train station smaller 5000m2',\n 45: 'Train station equal to or larger than 5000m2'\n }\n\n return dict_b_name[build_type]",
"def guess_part_type(self, data):\n if 'administrativEnhet' in data or 'saksbehandler' in data:\n typename = 'intern'\n elif 'kontaktperson' in data \\\n or -1 != data['navn'].find(' AS'):\n typename = 'enhet'\n else:\n typename = 'person'\n return typename",
"def makeNonStereoName(molType, name, n=None):\n\n match = re.match('(\\w+)(\\d|\\'+)(\\D*)', name)\n \n if not match:\n #print molType, name, n\n return \n \n \n letters = match.group(1)\n number = match.group(2)\n prime = ''\n \n if number == '\\'':\n number = 1\n prime = '\\''\n elif number == '\\'\\'':\n number = 2\n prime = '\\''\n \n if n is None:\n n = int(number) - 1\n\n name = letters + prime + chr(ord('a')+n)+ match.group(3)\n \n return name",
"def get_plates_needed(self):\n\n Complete = [d[-18:-14] for d in self.SAVED_FILES]\n Total = [d[-23:-19] for d in self.SPECTRA_FILES]\n Needed_idx = [i for i, x in enumerate(Total) if x not in Complete]\n self.SPECTRA = [self.SPECTRA_FILES[x] for x in Needed_idx]\n print('Will be analyzing %d plate files' % len(Needed_idx))"
]
| [
"0.65725857",
"0.5870212",
"0.5793835",
"0.5738888",
"0.56977445",
"0.555273",
"0.55162096",
"0.5514364",
"0.5499766",
"0.537909",
"0.5353239",
"0.5325758",
"0.5300923",
"0.5287215",
"0.5242675",
"0.52093667",
"0.5150269",
"0.51234543",
"0.51130295",
"0.50905144",
"0.5077052",
"0.50770426",
"0.5072851",
"0.50723326",
"0.5055344",
"0.5051423",
"0.5048792",
"0.5043259",
"0.50409365",
"0.5032802"
]
| 0.73173416 | 0 |
Cubic interpolation. Compute the coefficients of the polynomial interpolating the points (xi[i],yi[i]) for i = 0,1,2,3. Returns c, an array containing the coefficients of p(x) = c[0] + c[1]x + c[2]x2 + c[3]x3. | def cubic_interp(xi,yi):
# check inputs and print error message if not valid:
error_message = "xi and yi should have type numpy.ndarray"
assert (type(xi) is np.ndarray) and (type(yi) is np.ndarray), error_message
error_message = "xi and yi should have length 3"
assert len(xi)==4 and len(yi)==4, error_message
error_message = "it is not possible to have more than one point in the with the same xi"
assert (len(np.unique(xi)) == len(xi)), error_message
# Set up linear system to interpolate through data points:
A = np.array([[1, 1, 1, 1], xi, xi**2, xi**3]).T
b = yi
c = solve(A,b)
return c | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cubic_interp(x,y,xi) :\n \n f = interp1d(x,y,kind='cubic')\n yi = f(xi)\n \n return yi",
"def cubic_interpol(X_P, Y_P):\r\n y_derivs = derivatives( X_P, Y_P ).flatten() # flatten as FB_sub returns 2d array\r\n \r\n for j in np.arange( X_P.shape[0] - 1 ): # for every x[i] and x[i+1] pair\r\n plot_points = np.linspace( X_P[j], X_P[j+1], 20) # points to plot in the interval\r\n params = [ X_P[j], X_P[j+1], Y_P[j], Y_P[j+1],\r\n y_derivs[j], y_derivs[j+1]]\r\n f_points = f(plot_points, params)\r\n plt.plot(plot_points, f_points, 'b-', ms = .5, label = 'Cubic'if j==0 else \"\") # only label one plot\r",
"def solve_i():\r\n x = np.array([ -2.1, -1.45, -1.3, -0.2, 0.1, 0.15, 0.8, 1.1, 1.5, 2.8, 3.8 ])\r\n y = np.array([0.012155, 0.122151, 0.184520, 0.960789, 0.990050, 0.977751,\r\n 0.527292, 0.298197, 0.105399, 3.936690E-4, 5.355348E-7])\r\n # find and plot both interpolations and the oiginal points\r\n plt.figure(1)\r\n cubic_interpol(x,y)\r\n lin_interpol(x,y)\r\n plt.plot(x, y, 'rx', ms = 10, label = 'Points')\r\n # plot settings\r\n plt.title('Cubic & Linear Interpolation Given Points')\r\n plt.xlabel('x',fontsize = 14)\r\n plt.ylabel('y',fontsize = 14)\r\n plt.legend()",
"def cubicSpline(x,y,x_int):\n\n #region \"learn\" the coefficients of the cubic polynomials that interpolate intervals in x.\n # amount of intervals/splines\n n = len(x)-1\n\n # a_i = y_i\n a = y[:-1]\n\n # h_i = x_{i+1} - x_i for i in 0..n-1\n h = x[1:]-x[:-1]\n\n # 2 * h_i + h_{i+1}\n diagA = 2*(h[1:]+h[:-1])\n \n # h_1..h_n-2\n hInA = h[1:-1]\n\n A = np.eye(n-1)*diagA\n # distribute h_1..h_n-2 above and underneath the diagonal\n A += np.diag(hInA,1)\n A += np.diag(hInA,-1)\n\n # construct RHS\n z = 3/h[1:] * (y[2:] - y[1:-1]) - 3/h[:-1] * (y[1:-1] - y[:-2])\n\n # c_0 = c_{n} = 0\n c = np.zeros(n+1)\n\n c[1:-1] = np.linalg.solve(A,z)\n \n b = (y[1:]-y[:-1])/h - h/3*(c[1:] + 2*c[:-1])\n\n d = 1/(3*h)*(c[1:]-c[:-1])\n #endregion\n\n #region interpolate all points in x_int\n y_int = x_int.copy()\n # for all intervals\n for i in range(len(x)-1):\n # find points to interpolate within given interval\n idx = np.where(np.logical_and(x[i]<= x_int,x_int < x[i+1]))[0]\n xx = x_int[idx]\n yy = np.polyval(np.array([d[i],c[i],b[i],a[i]]), xx-x[i])\n y_int[idx] = yy\n print(f'interpolating in interval [{x[i]},{x[i+1]}[')\n print(xx)\n print(yy)\n print('\\n')\n\n # edgecase where x_int contains exactly last interval border\n #find indicies if x_int contains dupes\n idx = np.where(x_int == x[len(x)-1])[0] \n # interpolate with last interval polynomial\n i = len(a)-1\n y_int[idx] = np.polyval(np.array([d[i],c[i],b[i],a[i]]), x_int[idx]-x[i])\n #endregion\n return y_int",
"def interpolateCubic( t):\n curframe = []\n frame = np.searchsorted( keytime, t, side='right') - 1\n\n for i in range(11):\n poly = S[i]\n res = poly[frame](t)\n curframe.append(res)\n\n return curframe",
"def interp_spline(x0, x, y, z):\n\n size = len(x)\n\n # find index\n index = np.asarray(x.searchsorted(x0), dtype=bool)\n np.clip(index, 1, size - 1, index)\n\n xi1, xi0 = x[index], x[index - 1]\n yi1, yi0 = y[index], y[index - 1]\n zi1, zi0 = z[index], z[index - 1]\n hi1 = xi1 - xi0\n # print(xi0, xi1, yi0, yi1, zi0, zi1)\n\n # calculate cubic\n f0 = zi0 / (6 * hi1) * (xi1 - x0) ** 3 + zi1 / (6 * hi1) * (x0 - xi0) ** 3 + (yi1 / hi1 - zi1 * hi1 / 6) * (\n x0 - xi0) + (yi0 / hi1 - zi0 * hi1 / 6) * (xi1 - x0)\n\n return f0",
"def cubic_interpolation(robot_joints, robot_num_joints):\n robot_joints = np.array(robot_joints)\n robot_joints_pos = np.reshape(robot_joints, (-1, robot_num_joints))\n x = list(range(0, len(robot_joints_pos)))\n return CubicSpline(x, robot_joints_pos)",
"def coeffients(x, y):\n\n # ensure floating point datatypes\n x.astype(float)\n y.astype(float)\n\n # degree of interpolating polynomial\n n = len(x)\n\n # intitilize list of coeffients for interpolating polynomial to y values\n c = y.tolist()\n\n # compute coeffients\n for j in range(1, n):\n for i in range(n-1, j-1, -1):\n c[i] = float(c[i]-c[i-1])/float(x[i]-x[i-j])\n\n # return an array of polynomial coefficient, note: reverse order for np.polyval function\n return np.array(c[::-1])",
"def test_cubic():\n xi = np.array([-1., 0., 2., 4.])\n yi = np.array([ 1., -1., 7., 6.])\n \n c = quad_interp(xi,yi)\n c_true = np.array([-1. , 1.25 , 2.625, -0.625])\n \n print(\"c = \", c)\n print(\"c_true = \", c_true)\n \n # test that all elements have small error:\n assert np.allclose(c, c_true), \\\n \"Incorrect result, c = %s, Expected: c = %s\" % (c,c_true)",
"def c_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope):\n\tC = c_matrix(x1,x2,x3)\n\ty = y_vector(x1,x2,x3,y1,y2,y3,initial_slope,final_slope)\n\tCCoefficients = np.dot(inv(C),y)\n\treturn(CCoefficients)",
"def cubic_spline(double[:] x):\n cdef:\n cnp.npy_intp i\n cnp.npy_intp n = x.shape[0]\n double[:] sx = np.zeros(n, dtype=np.float64)\n with nogil:\n for i in range(n):\n sx[i] = _cubic_spline(x[i])\n return np.asarray(sx)",
"def interpolateCubicPeriodic() :\n\n S = []\n\n # for all parameters\n for i in range(11):\n y = []\n # get i-th parameter\n for k in range(len(keyframe)):\n y.append(keyframe[k][i])\n\n interpolants = interpolatePeriodicSpline(keytime, y)\n S.append(interpolants)\n return S",
"def spline_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope):\n\tC = c_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope)\n\tD = d_coefficients(x1,x2,x3,C)\n\tB = b_coefficients(x1,x2,x3,y1,y2,y3,C,D)\n\tA = a_coefficients(y1,y2)\n\treturn(A,B,C[:2],D)",
"def C(i, x):\n if i == 1:\n return np.array([[1., 0., 0.],\n [0., cos(x), sin(x)],\n [0., -sin(x), cos(x)]])\n elif i == 2:\n return np.array([[cos(x), 0., -sin(x)],\n [0., 1., 0.],\n [sin(x), 0., cos(x)]])\n elif i == 3:\n return np.array([[cos(x), sin(x), 0.],\n [-sin(x), cos(x), 0.],\n [0., 0., 1.]])",
"def _pchip_coeffs_i(X, Y, i):\n\n # Pre-assign sizes for PCHIP variables.\n h = [0.0, 0.0, 0.0]\n δ = [0.0, 0.0, 0.0]\n d = [0.0, 0.0]\n\n # Check whether x is adjacent to the start or end of this X\n at_start = (i == 0) or np.isnan(X[i - 1] + Y[i - 1])\n at_end = (i == len(X) - 2) or np.isnan(X[i + 2] + Y[i + 2])\n\n if at_start and at_end:\n\n # if np.isnan(X[i + 1]) or np.isnan(Y[i + 1]):\n # # Only one valid data point. Leave the interpolant as NaN.\n # d[0], c, b = np.nan, np.nan, np.nan\n\n # else:\n\n # ||| X[0] <= x <= X[1] ||| Revert to Linear Interpolation\n # If actually only one non-NaN data point, then d[0] will be NaN, so\n # interpolant will evaluate to NaN.\n d[0] = (Y[i + 1] - Y[i]) / (X[i + 1] - X[i])\n C3, C2 = 0.0, 0.0\n\n else:\n if at_start:\n # ||| X[0] <= x <= X[1] < X[2] --->\n h[1] = X[i + 1] - X[i]\n h[2] = X[i + 2] - X[i + 1]\n δ[1] = (Y[i + 1] - Y[i]) / h[1]\n δ[2] = (Y[i + 2] - Y[i + 1]) / h[2]\n\n # Noncentered, shape-preserving, three-point formula:\n d[0] = ((2.0 * h[1] + h[2]) * δ[1] - h[1] * δ[2]) / (h[1] + h[2])\n if np.sign(d[0]) != np.sign(δ[1]):\n d[0] = 0.0\n elif (np.sign(δ[1]) != np.sign(δ[2])) and (\n np.abs(d[0]) > np.abs(3.0 * δ[1])\n ):\n d[0] = 3.0 * δ[1]\n\n # Standard PCHIP formula\n if np.sign(δ[1]) * np.sign(δ[2]) > 0.0:\n w1 = 2.0 * h[2] + h[1]\n w2 = h[2] + 2.0 * h[1]\n d[1] = (w1 + w2) / (w1 / δ[1] + w2 / δ[2])\n else:\n d[1] = 0.0\n\n elif at_end:\n # <--- X[i-1] < X[i] < x <= X[i+1] |||\n h[0] = X[i] - X[i - 1]\n h[1] = X[i + 1] - X[i]\n δ[0] = (Y[i] - Y[i - 1]) / h[0]\n δ[1] = (Y[i + 1] - Y[i]) / h[1]\n\n # Standard PCHIP formula\n if np.sign(δ[0]) * np.sign(δ[1]) > 0.0:\n w1 = 2.0 * h[1] + h[0]\n w2 = h[1] + 2.0 * h[0]\n d[0] = (w1 + w2) / (w1 / δ[0] + w2 / δ[1])\n else:\n d[0] = 0.0\n\n # Noncentered, shape-preserving, three-point formula:\n d[1] = ((h[0] + 2.0 * h[1]) * δ[1] - h[1] * δ[0]) / (h[0] + h[1])\n if np.sign(d[1]) != np.sign(δ[1]):\n d[1] = 0.0\n elif (np.sign(δ[1]) != np.sign(δ[0])) and (\n np.abs(d[1]) > np.abs(3 * δ[1])\n ):\n\n d[1] = 3.0 * δ[1]\n\n else:\n # <--- X[i-1] < X[i] < x <= X[i+1] < X[i+2] --->\n h[0] = X[i] - X[i - 1] # Way faster to do this\n h[1] = X[i + 1] - X[i] # than\n h[2] = X[i + 2] - X[i + 1] # diff(X(i-1:i+3))\n δ[0] = (Y[i] - Y[i - 1]) / h[0]\n δ[1] = (Y[i + 1] - Y[i]) / h[1]\n δ[2] = (Y[i + 2] - Y[i + 1]) / h[2]\n\n # Standard PCHIP formula\n for j in range(2):\n if np.sign(δ[j]) * np.sign(δ[j + 1]) > 0.0:\n w1 = 2.0 * h[j + 1] + h[j]\n w2 = h[j + 1] + 2.0 * h[j]\n d[j] = (w1 + w2) / (w1 / δ[j] + w2 / δ[j + 1])\n else:\n d[j] = 0.0\n\n # Polynomial coefficients for this piece\n dzzdx = (δ[1] - d[0]) / h[1]\n dzdxdx = (d[1] - δ[1]) / h[1]\n C3 = (dzdxdx - dzzdx) / h[1] # coeff of the 3rd degree term (x^3)\n C2 = 2 * dzzdx - dzdxdx # coeff of 2nd degree term (x^2)\n\n # The following code evaluates the `d`'th deriviative of the cubic\n # interpolant at `x`.\n # s = x - X[i]\n # if d == 0:\n # y = Y[i] + s * (d[0] + s * (C2 + s * C3))\n # elif d == 1: # first derivative\n # y = d[0] + s * (2 * C2 + 3 * s * C3)\n # elif d == 2: # second derivative\n # y = 2 * C2 + 6 * s * C3\n # elif d == 3: # third derivative\n # y = 6 * C3\n # else:\n # y = 0.0\n # return y\n\n # Faster to return tuple than build an np.array just to deconstruct it later\n return C3, C2, d[0], Y[i]",
"def cubic(cls, a):\r\n return cls.from_parameters(a, a, a, 90, 90, 90)",
"def coefficients(k, xi, x):\n\n import pyweno.cnonuniform\n\n x = np.asarray(x, np.float64)\n xi = np.asarray(xi, np.float64)\n\n nc = len(x) - 1\n n = len(xi)\n c = np.zeros((nc, n, k, k), np.float64)\n beta = np.zeros((nc, k, k, k), np.float64)\n varpi = np.zeros((nc, n, k), np.float64)\n\n pyweno.cnonuniform.nonuniform_coeffs(k, xi, x, c, beta, varpi)\n\n return c, beta, varpi",
"def coef_approximation(p, order):\n\n\t# maintain parity of order +1\n\tn = 50 + order +1\n\tr = 1\n\txs = np.linspace(-r, r, num=n)\n\tys = p(xs)\n\n\t# [TODO]: fix coeffients method\n\t# replace with 'c = coeffients(xs, ys)'\n\tdegree = n \n\tc = np.polyfit(xs,ys,degree)\n\n\treturn c",
"def cspline_params(self):\n b = np.zeros(self.n)\n c = np.zeros(self.n-1)\n d = np.zeros(self.n-1)\n B = np.zeros(self.n)\n Q = np.ones(self.n-1)\n D = 2 * np.ones(self.n)\n dx = np.zeros(self.n-1)\n p = np.zeros(self.n-1)\n\n # Calculate x-interval and slope\n for j in range(self.n-1):\n dx[j] = self.x[j+1] - self.x[j]\n p[j] = (self.y[j+1] - self.y[j]) / dx[j]\n\n # Fill B\n B[0] = 3 * p[0]\n for i in range(self.n-2):\n B[i+1] = 3 * (p[i] + p[i+1] * dx[i] / dx[i+1])\n B[-1] = 3 * p[-2]\n \n # Fill D\n for i in range(self.n-2):\n D[i+1] = 2 * dx[i] / dx[i+1] + 2\n\n # Fill Q\n for i in range(self.n-2):\n Q[i+1] = dx[i] / dx[i+1]\n\n # Gauss elimination\n for i in range(1, self.n):\n D[i] = D[i] - Q[i-1] / D[i-1]\n B[i] = B[i] - B[i-1] / D[i-1]\n\n # Back-substitution\n b[-1] = B[-1] / D[-1]\n list = range(self.n-1)\n for i in list[::-1]:\n b[i] = (B[i] - Q[i] * b[i+1]) / D[i]\n\n # Calculate c and d\n for i in range(self.n-1):\n c[i] = (3 * p[i] - 2 * b[i] - b[i+1]) / dx[i]\n d[i] = (b[i] + b[i+1] - 2 * p[i]) / dx[i]\n c[-1] = -3 * d[-1] * dx[-1]\n\n return b, c, d",
"def compCoeff_CGP(i, A, c, N):\n Ap = np.copy(A)\n out = c[i, 0] * np.eye(N)\n j = 1\n while j <= i:\n # compute A to the power p\n if j > 1:\n Ap = Ap.dot(A)\n\n # add to the polynome\n out += c[i, j] * Ap\n j += 1\n\n return out",
"def createCubicBezier(self):\n return _libsbml.Curve_createCubicBezier(self)",
"def nice_cubic_polynomial(p):\n tmp = \"\"\n if p[\"a\"] == 1:\n tmp += \" x^3\"\n elif p[\"a\"] != 0:\n tmp += \"%.2fx^3\" % p[\"a\"]\n if p[\"b\"] == 1:\n tmp += \"\\t+ x^2\"\n elif p[\"b\"] != 0:\n tmp += \"\\t+ %.2fx^2\" % p[\"b\"]\n else:\n tmp += \"\\t\\t\"\n if p[\"c\"] == 1:\n tmp += \"\\t+ x\"\n elif p[\"c\"] != 0:\n tmp += \"\\t+ %.2fx\" % p[\"c\"]\n else:\n tmp += \"\\t\\t\"\n if p[\"d\"] != 0:\n tmp += \"\\t+ %.2f\" % p[\"d\"]\n return tmp",
"def interpolateCubicNatural() :\n\n S = []\n\n # for all parameters\n for i in range(11):\n y = []\n # get i-th paramter\n for k in range(len(keyframe)):\n y.append(keyframe[k][i])\n\n interpolants = interpolateSpline(keytime, y)\n S.append(interpolants)\n return S",
"def cubic_evolve(self,nt=1):\n #loop through time steps\n for l in range(nt):\n # temporary array\n y_temp = np.zeros(self.y.shape[0])\n # loop through array\n for i in range(self.y.shape[0]):\n # idx left to departure point\n x_dep = self.x[i]-self.u[i]*self.dt\n j = int(np.floor(x_dep/self.dx))\n # alpha\n a = (self.x[i]-self.u[i]*self.dt - j*self.dx)/self.dx\n # calculate next time step\n f = lambda x: x % self.y.shape[0] if x >= self.y.shape[0] else x\n y_temp[i] = - a * (1-a)*(2-a)/6 * self.y[f(j-1)]\n y_temp[i] += (1-a**2)*(2-a)/2 * self.y[f(j)]\n y_temp[i] += a*(1+a)*(2-a)/2 * self.y[f(j+1)]\n y_temp[i] -= a*(1-a**2)/6 * self.y[f(j+2)]\n self.y = np.copy(y_temp)\n return self.y",
"def splineval(x,C,X):\n m = len(X)\n i = findsubintervals(x,X) \n G = zeros(m)\n for j in range(m):\n k = i[j]\n t = X[j] - x[k]\n G[j]=C[k,:]* t**array([[0],[1],[2],[3]])\n return G",
"def poly_regression_cubic(X, Y, Xs_test, Ys_test):\n \n poly = PolynomialFeatures(degree = 3)\n X3 = poly.fit_transform(X)[:,1:]\n X3_test = []\n for X_test in Xs_test:\n X3_test.append(poly.fit_transform(X_test)[:,1:])\n mses = linear_regression(X3, Y, X3_test, Ys_test)\n return mses",
"def HermiteC1(x0,y0,y0p,x1,y1,y1p,color):\n x = np.linspace(x0,x1,100)\n t = (x-x0)/(x1-x0)\n y = y0 * H0(t) + y0p * (x1-x0) * H1(t) + y1p * (x1-x0) * H2(t) + y1 * H3(t)\n plt.plot(x,y,color,label='cubic interpolant')",
"def evaluate_spline_coeffs(self, i):\n width = self.bounds[i] - self.bounds[i - 1] # region size, lambda\n si = self.slopes[i]\n sim1 = self.slopes[i - 1]\n rsi = self.region_slopes[i]\n gim1 = self.gis[i - 1]\n\n a = (3.0 * (3.0 * si + sim1) - 12.0 * rsi) / (2.0 * width ** 3)\n b = (12.0 * rsi - 3.0 * (si + 3.0 * sim1)) / (2.0 * width ** 3)\n c = 2 * rsi - 0.5 * si - 0.5 * sim1\n d = gim1 - width * (0.5 * rsi - 0.125 * si - 0.375 * sim1)\n\n return a, b, c, d",
"def quad_interp(xi,yi):\n\n # check inputs and print error message if not valid:\n\n error_message = \"xi and yi should have type numpy.ndarray\"\n assert (type(xi) is np.ndarray) and (type(yi) is np.ndarray), error_message\n\n error_message = \"xi and yi should have length 3\"\n assert len(xi)==3 and len(yi)==3, error_message\n\n error_message = \"it is not possible to have more than one point in the with the same xi\"\n assert (len(np.unique(xi)) == len(xi)), error_message\n\n # Set up linear system to interpolate through data points:\n\n A = np.array([[1, 1, 1], xi, xi**2]).T\n b = yi\n\n c = solve(A,b)\n \n return c",
"def slip_to_coefficients(x, y, a):\n partials = np.zeros((x.size, 3))\n partials[:, 0] = (x / a) * (9 * (x / a) / 8 - 3 / 4)\n partials[:, 1] = (1 - 3 * (x / a) / 2) * (1 + 3 * (x / a) / 2)\n partials[:, 2] = (x / a) * (9 * (x / a) / 8 + 3 / 4)\n coefficients = np.linalg.inv(partials) @ y\n return coefficients"
]
| [
"0.75038683",
"0.73429656",
"0.70901597",
"0.6669274",
"0.64942664",
"0.64865476",
"0.64687324",
"0.64412856",
"0.64355713",
"0.63988495",
"0.6367994",
"0.6366493",
"0.6226454",
"0.61955345",
"0.61307824",
"0.600796",
"0.59882486",
"0.59307075",
"0.58748794",
"0.58747196",
"0.5855656",
"0.58507895",
"0.58274955",
"0.58137196",
"0.58000946",
"0.5792851",
"0.578799",
"0.57761633",
"0.57537204",
"0.57240385"
]
| 0.8025091 | 0 |
Score conversions and redirect as specified by url params Expects a 'continue' url parameter for the destination, and a 'conversion_name' url parameter for each conversion to score. | def get(self):
cont = self.request.get('continue', default_value='/')
# Check whether redirecting to an absolute or relative url
netloc = urlparse.urlsplit(cont).netloc
if netloc:
# Disallow absolute urls to prevent arbitrary open redirects
raise custom_exceptions.InvalidRedirectURLError(
"Redirecting to an absolute url is not allowed.")
conversion_names = self.request.get_all('conversion_name')
if len(conversion_names):
bingo(conversion_names)
self.redirect(_iri_to_uri(cont)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_conversion(queries, query, src, dst, val, currencies, wf):\n ####################################################################################################\n # Make the currency case insensitive\n ####################################################################################################\n if src:\n src = src.upper()\n if dst:\n dst = dst.upper()\n\n ####################################################################################################\n # Validate the currencies to check if its a currency or not\n ####################################################################################################\n if not validate_currencies(queries, query, src, dst, currencies, wf):\n return 100\n\n rate = search_rate(src, dst, wf)\n\n if rate == -1:\n wf.add_item('No exchange rate found for the especified currencies...', icon=ICON_ERROR)\n return 1\n\n ####################################################################################################\n # Gets the currency info\n ####################################################################################################\n src_currency_info = currencies[src]\n dst_currency_info = currencies[dst]\n\n cur_src_name = get_currency_name(src_currency_info)\n cur_dst_name = get_currency_name(dst_currency_info)\n\n cur_dst_symbol = str.decode(dst_currency_info['Simbol'], encoding='utf-8')\n flag_file_icon = wf.workflowfile('flags/{}'.format(dst_currency_info['Flag']))\n\n if not val:\n val = 1\n\n converted_rate = Decimal(val) * rate\n\n decimal_places = get_decimal_places_to_use(rate)\n\n fmt_converted_rate = format_result(wf, converted_rate, decimal_places)\n\n # module 1 will result in just the decimal part, if the decimal part is 0, then i'll show only 2 decimal places\n if (rate % Decimal(1)).compare(Decimal('0')) == 0:\n fmt_rate = format_result(wf, rate, 2)\n else:\n fmt_rate = format_result(wf, rate, decimal_places)\n\n title = cur_dst_symbol + ' ' + fmt_converted_rate\n sub_title = u'({}) -> ({}) with rate {} for query: {}'.format(cur_src_name, cur_dst_name, fmt_rate,\n ' '.join(query).upper())\n\n wf.add_item(title, sub_title, valid=True, arg=str(converted_rate), icon=flag_file_icon)\n\n ############################################################################################\n # Checks if an update is available, and add it to the output\n ############################################################################################\n if wf.update_available:\n handle_check_update(wf)\n\n return 0",
"def get_scores(self, params):\n ep = ENDPOINTS.GET_SCORES\n self._check_parameters(ep, params)\n url = self.base_url.format(ep.EXTENSION)\n url = self._extend_url(url, params)\n return self._process_url(url)",
"def score(self, urlids, wordids):\r\n\t\tself.urlids = urlids\r\n\t\tself.wordids = wordids\r\n\t\tself.scores = self.tf_score()\r\n\t\treturn self.scores",
"def convert():\r\n\r\n try:\r\n con_from = request.args[\"convert_from\"]\r\n con_to = request.args[\"convert_to\"]\r\n amount = Decimal(request.args[\"amount\"])\r\n\r\n converted_amt = c.convert(con_from, con_to, amount)\r\n display_amt = round(converted_amt, 2)\r\n\r\n return render_template(\"rate.html\", display_amt=display_amt, con_from=con_from, con_to=con_to)\r\n except:\r\n return redirect(url_for('error'))",
"def redirect(url):",
"def getCogsScoreByParameters(self, url_parameters:str):\n\n\n self.function += '?' + url_parameters\n\n result_get = GetRest(function = self.function).performRequest()\n return result_get",
"def score_matches(analysis_matches, args):\n\t\n\tscore_types = ('found_score', 'host_score', 'virus_score')\n\t\n\tfor match_type in analysis_matches.values():\n\t\tfor match in match_type:\n\t\t\t# if read crosses an integration\n\t\t\tif match['intID'] is not None:\n\t\t\t\t# if integration was found, and should have been found\n\t\t\t\tif match['found'] is True:\n\t\t\t\t\tmatch['found_score'] = 'tp'\n\t\t\t\t\t\n\t\t\t\t\t# check if integration was found in the correct place in the host\n\t\t\t\t\tif correct_pos(match, args, ref = 'host'):\n\t\t\t\t\t\tmatch['host_score'] = 'tp'\n\t\t\t\t\telse:\n\t\t\t\t\t\tmatch['host_score'] = 'fp'\t\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t# check if integration was found in the correct virus\n\t\t\t\t\tif correct_pos(match, args, ref = 'virus'):\n\t\t\t\t\t\tmatch['virus_score'] = 'tp'\n\t\t\t\t\telse:\n\t\t\t\t\t\tmatch['virus_score'] = 'fp'\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tfor score_type in score_types:\n\t\t\t\t\t\tmatch[score_type] = 'fn'\n\t\t\t\n\t\t\t# if read doesn't cross integration\n\t\t\telse:\n\t\t\t\t# doesn't cross but is found - false positive\n\t\t\t\tif match['found'] is True:\n\t\t\t\t\t\tfor score_type in score_types:\n\t\t\t\t\t\t\tmatch[score_type] = 'fp'\n\t\t\t\t# doesn't cross and isn't found - true netgative\n\t\t\t\telse:\n\t\t\t\t\tfor score_type in score_types:\n\t\t\t\t\t\tmatch[score_type] = 'tn'\n\t\t\t\t\t\n\n\t\t\t\t\n\treturn analysis_matches",
"def _transit_to_scores(self, **kwargs):\n logging.debug(\"in _transit_to_scores\")\n handler = kwargs['handler']\n\n game = models.Hangout.get_by_id(self.hangout_id).current_game.get()\n if not game:\n if handler:\n handler.accumulate_response(\n {'status': 'ERROR',\n 'message': \"Game for hangout %s not found\" % (self.hangout_id,)})\n return False\n if game.state != self.state_name:\n return False # not in 'voting' state\n game.state = 'scores'\n participants = self._calculate_scores(game)\n game.put()\n # send out the score info on the channels.\n # TODO: currently, the scores for this round are only recorded briefly,\n # as the code below will reset them as part of the setup for the\n # next round/game. Might want to change this.\n # TODO: should the broadcasting part be part of the handler logic or\n # the state transition logic?\n self._broadcast_scores(participants, game.key.id(), game.current_round)\n\n # We can now start a new round. This resets the card selection and vote\n # fields. If we've had N rounds, this is a new game instead. \n if game.current_round >= (config.ROUNDS_PER_GAME - 1):\n # if have reached the limit of rounds for a game,\n # then start new game using the participants of the current game\n self.start_new_game(participants)\n return True\n else:\n # otherwise, start new round in the current game\n logging.info(\"starting new round.\")\n game.start_new_round(participants)\n return True",
"def submit_urls(args):\n params = {\n 'api_key': API_KEY,\n 'url': args.get('url')\n }\n markdown = ''\n r = req('POST', SUB_API + 'samples', params=params)\n res = r.json()['data']\n markdown += tableToMarkdown('Threat Grid - URL Submission', res)\n results = CommandResults(\n readable_output=markdown,\n outputs_prefix='Threatgrid.SearchResult',\n outputs_key_field='Info',\n outputs=res\n )\n return results",
"def on_post(self, req, resp):\n try:\n data = json.loads(req.stream.read())\n except ValueError:\n error_response(resp, 'ERROR: could not parse JSON')\n return\n\n try:\n url = data['url'].encode('ascii', 'ignore')\n except KeyError:\n error_response(resp, 'ERROR: could not find \"url\" field in request')\n return\n\n try:\n links = [(lurl.encode('ascii', 'ignore'), score)\n for (lurl, score) in data.get('links', [])]\n cp = aduana.CrawledPage(url, links)\n cp.score = data.get('score', 0.0)\n\n content_hash = data.get('content_hash', None)\n if content_hash:\n cp.hash = int(content_hash)\n\n except TypeError as e:\n error_response(resp, 'ERROR: Incorrect data inside CrawledPage. ' + str(e))\n return\n\n self.scheduler.add(cp)\n resp.status = falcon.HTTP_201",
"def score(self, urlids, wordids):\r\n\t\tself.urlids = urlids\r\n\t\tself.wordids = wordids\r\n\t\tfor urlid in self.urlids:\r\n\t\t \tsql = \"select pagerank from pagelink where urlid=%d\" % urlid\r\n\t\t\tpr = self.cur.execute(sql).fetchone()[0]\r\n\t\t\tself.scores[urlid] = pr\r\n\t\treturn self.scores",
"def update_score(self, move_no=None, players=None, edge=None):\r\n if self.results_file is not None:\r\n row = edge.row\r\n col = edge.col\r\n pln = self.get_player_num()\r\n self.results_file.next_move(player=pln, row=row, col=col)\r\n \r\n if self.mw is None:\r\n return\r\n\r\n if not self.display_game:\r\n return\r\n \r\n if self.score_window is None:\r\n return\r\n \r\n self.score_window.update_score(move_no=move_no, players=players)",
"def handle_url(url, session, res):\n print(\"Parsing\", url, file=sys.stderr)\n try:\n data, baseUrl = getPageContent(url, session)\n except IOError as msg:\n print(\"ERROR:\", msg, file=sys.stderr)\n return\n for match in url_matcher.finditer(data):\n url = match.group(1)\n name = unescape(match.group(2))\n name = asciify(name.replace('&', 'And').replace('@', 'At'))\n name = capfirst(name)\n if name in exclude_comics:\n continue\n if contains_case_insensitive(res, name):\n # we cannot handle two comics that only differ in case\n print(\"INFO: skipping possible duplicate\", repr(name), file=sys.stderr)\n continue\n res[name] = url",
"def pagerank(self, limit=20):\r\n\t\tfor urlid in self.url_ids:\r\n\t\t\tself.all_scores[urlid] = 1.0\r\n\r\n\t\tfor i in range(limit):\r\n\t\t\tfor urlid in self.url_ids:\r\n\t\t\t\tscore = self.all_scores[urlid]\r\n\t\t\t\tfor fromid in self.from_ids[urlid]:\r\n\t\t\t\t\tscore += self.all_scores[fromid] / \\\r\n\t\t\t\t\t\t\t (len(self.from_ids[fromid])+len(self.to_ids[fromid]))\r\n\t\t\t\tscore *= 0.85\r\n\t\t\t\tscore += 0.15\r\n\t\t\t\tself.all_scores[urlid] = score\r\n\t\tself.save_pr()",
"def gonext():\n next = cherrypy.request.params.get('next', '')\n if next != '':\n redirect(next)",
"def conversion_rates(self, start_date, end_date, user_id=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/conversion_statistics'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json",
"def mal_url_score(self, g, w):\n if g.addresses_url and g.addresses_ip:\n addresses_url = g.addresses_url.split(' ')\n addresses_ip = g.addresses_ip.split(' ')\n url_and_ip = addresses_url + addresses_ip\n elif g.addresses_url and not g.addresses_ip:\n url_and_ip = g.addresses_url.split(' ')\n elif g.addresses_ip and not g.addresses_url:\n url_and_ip = g.addresses_ip.split(' ')\n else:\n url_and_ip = None\n\n scores = self.multi_score(url_and_ip, w.addresses_url, self.sim_address)\n\n return max(scores)",
"def url_scan(url):\n # Create the required data dictionary for URL/Scan\n api_data = {\n 'url': url\n }\n response = http_request(endpoint=URL_SCAN_API, data=api_data)\n\n if response.get('errorNo') == 1:\n url_threat_data = response.get('urlData').get('threatData')\n snx_ioc_cont = {\n 'Value': url,\n 'Type': 'Scanned URL',\n 'Verdict': url_threat_data.get('verdict'),\n 'ThreatStatus': url_threat_data.get('threatStatus'),\n 'ThreatType': url_threat_data.get('threatType'),\n 'ThreatName': url_threat_data.get('threatName'),\n 'FirstSeen': url_threat_data.get('firstSeen'),\n 'LastSeen': url_threat_data.get('lastSeen'),\n 'ScanID': response.get('urlData').get('scanId')\n }\n ec = {\n 'SlashNext.URL(val.Value === obj.Value)': snx_ioc_cont\n }\n md = '### SlashNext Phishing Incident Response - URL Scan\\n' \\\n '##### url = {}\\n' \\\n 'Your Url Scan request is submitted to the cloud and may take up-to 60 seconds to complete.\\n'\\\n 'Please check back later using \"slashnext-scan-report\" command with Scan ID = {} or running the same ' \\\n '\"slashnext-url-scan\" command one more time.'.format(url, response.get('urlData').get('scanId'))\n return_outputs(md, ec, response)\n elif response.get('errorNo') != 0:\n return_error('API Returned, {}:{}'.format(response.get('errorNo'), response.get('errorMsg')))\n\n return response",
"def main():\n\targs = parse_command_line_options()\n\n\t# Load CSV export file containing all the crawl errors\n\tloaded_file = open(args.file, 'r')\n\n\t# Pop off CSV Legend - TODO\n\t# export_map_legend = next(loaded_file)\n\t# export_map_legend = re.sub('[^a-zA-Z0-9-_,*.]', '', export_map_legend)\n\t# export_map_legend = export_map_legend.split(',')\n\n\t# store file name for output later\n\tfile_name = os.path.basename(args.file).replace('.csv', '')\n\n\tlogging.info(\"Loaded CSV Export\")\n\n\t# Load CSV redirect mapping file\n\tredirects_map_file = open(args.redirect_map, 'r')\n\tredirects_dict = map_redirects(redirects_map_file)\n\n\t# Setup output variables\n\toutput = []\n\toutput.append(['/old/path','/new/path'])\n\toutput_file = os.path.join(\n\t\targs.output_location,\n\t\t\"wp_redirects_{}.csv\".format( file_name )\n\t)\n\n\t# Setup leftover variables\n\tleftovers = []\n\t# Add legend for original export to our leftovers array,\n\t# mismatches will be stored here later & we want to retain the structure.\n\t# TODO\n\t# leftovers.append(export_map_legend)\n\n\tleftovers_file = os.path.join(\n\t\targs.output_location,\n\t\t\"wp_redirects_nomatch_{}.csv\".format( file_name )\n\t)\n\n\t# Loop through each row\n\tfor row in loaded_file:\n\t\t# Parse row and generate new row for output\n\t\tline = row.split(',')\n\t\tprint(line)\n\n\t\t# Returns false when no redirect url is found in the dict, if found it returns the matched redirect URL from the dict\n\t\tredirect_url = get_redirect(line[0], redirects_dict)\n\n\t\t# If row doesn't fall into predefined category, store entire row in leftovers instead.\n\t\tif False == redirect_url:\n\t\t\tleftovers.append([ line[0] ])\n\t\t\tcontinue\n\n\t\tbad_url = '/' + line[0]\n\t\toutput.append([ bad_url, redirect_url ])\n\n\t# Close our open files\n\tloaded_file.close()\n\tredirects_map_file.close()\n\n\t# print(leftovers)\n\t# print(output)\n\n\t# Save WP output to CSV file\n\twith open(output_file, 'w', newline=\"\", encoding=\"utf-8-sig\") as file_handle:\n\t\tprint('creating: ', output_file)\n\t\tcsvwriter = csv.writer(file_handle)\n\t\tcsvwriter.writerows(output)\n\n\t# Save leftovers output\n\twith open(leftovers_file, 'w', newline=\"\", encoding=\"utf-8-sig\") as file_handle:\n\t\tprint('creating: ', leftovers_file)\n\t\tcsvwriter = csv.writer(file_handle)\n\t\tcsvwriter.writerows(leftovers)\n\n\tlogging.info(\"Task Complete\")",
"def score_web_content(self, type, content, url):\n multiplier = 1\n if \"wikipedia.org\" in url:\n multiplier = WIKI_MULTIPLIER\n if type == 0:\n if DEBUG:\n print(f\"Scoring {url}\")\n t = time.time()\n self.score_forwards(content, multiplier)\n # print(f\"Scorer: score_forwards took {time.time()-t}s\")\n t = time.time()\n self.fetch_sentences(content)\n # print(f\"Scorer: fetch_sentences took {time.time()-t}s\")\n # self.score_sentences(content, multiplier) # turns out to be rather useless",
"def transfers(league_name, league_id, start, stop):\r\n try:\r\n for i in range(start, stop + 1):\r\n league_transfers = []\r\n season_id = str(i)\r\n for window in ['e', 'i']:\r\n league_transfers.append(scrape_season_transfers(league_name, league_id, season_id, window))\r\n sleep(3)\r\n df = pd.concat(league_transfers)\r\n df = df[~df['Name'].isna()]\r\n df.reset_index(drop=True, inplace=True)\r\n export_csv(df, season_id, league_name, league_id)\r\n except TypeError:\r\n print(\"Make sure league parameters are STRINGS and years are INTEGERS.\")",
"def score():\n\n # Read files\n s1 = request.form.get(\"string1\")\n s2 = request.form.get(\"string2\")\n if not s1 or not s2:\n abort(400, \"missing strings\")\n\n # Score files\n matrix = distances(s1, s2)\n\n # Extract operations from table\n operations = []\n i, j = len(s1), len(s2)\n while True:\n _, operation = matrix[i][j]\n if not operation:\n break\n if operation == Operation.INSERTED:\n j -= 1\n elif operation == Operation.DELETED:\n i -= 1\n else:\n i -= 1\n j -= 1\n operations.append(operation)\n operations.reverse()\n\n # Maintain list of intermediate strings, operation, and descriptions\n transitions = [(s1, None, None)]\n i = 0\n\n # Apply each operation\n prev = s1\n for operation in operations:\n\n # Update string and description of operation\n if operation == Operation.INSERTED:\n s = (prev[:i], s2[i], prev[i:])\n description = f\"inserted '{s2[i]}'\"\n prev = prev[:i] + s2[i] + prev[i:]\n i += 1\n elif operation == Operation.DELETED:\n s = (prev[:i], prev[i], prev[i + 1:])\n description = f\"deleted '{prev[i]}'\"\n prev = prev[:i] + prev[i + 1:]\n elif prev[i] != s2[i]:\n s = (prev[:i], s2[i], prev[i + 1:])\n description = f\"substituted '{prev[i]}' with '{s2[i]}'\"\n prev = prev[:i] + s2[i] + prev[i + 1:]\n i += 1\n else:\n i += 1\n continue\n transitions.append((s, str(operation), description))\n transitions.append((s2, None, None))\n\n # Output comparison\n return render_template(\"score.html\", matrix=matrix, s1=s1, s2=s2, operations=transitions)",
"def get(self, request: Request):\n code = request.path_params['code']\n pp = db.get_or_404(Participant, code=code)\n label = request.query_params.get(otree.constants.participant_label)\n\n pp.initialize(label)\n\n first_url = pp._url_i_should_be_on()\n return RedirectResponse(first_url)",
"def play(strategy0, strategy1, score0=0, score1=0, goal=GOAL_SCORE):\n player = 0 # Which player is about to take a turn, 0 (first) or 1 (second)\n dice_swapped = False # Whether 4-sided dice have been swapped for 6-sided\n # BEGIN PROBLEM 6\n def game_end():\n return score0 >= goal or score1 >= goal\n\n def num_rolls_for_player():\n if player == 0:\n return strategy0(score0, score1)\n else:\n return strategy1(score1, score0)\n\n def opponent_score():\n if player == 0:\n return score1\n else:\n return score0\n\n def updated_scores(score):\n if player == 0:\n return score0 + score, score1\n else:\n return score0, score1 + score\n\n while not game_end():\n num_rolls = num_rolls_for_player()\n score = take_turn(num_rolls, opponent_score(), select_dice(dice_swapped))\n score0, score1 = updated_scores(score)\n if is_perfect_piggy(score): # Perfect Piggy rule\n dice_swapped = not dice_swapped\n if is_swap(score0, score1): # Swine Swap rule\n score0, score1 = score1, score0\n player = other(player)\n # END PROBLEM 6\n return score0, score1",
"def submit_curation(self, corpus_id, curations):\n logger.info('Submitting curations for corpus \"%s\"' % corpus_id)\n corpus = self.get_corpus(corpus_id, check_s3=True, use_cache=True)\n # Start tabulating the curation counts\n prior_counts = {}\n subtype_counts = {}\n # Take each curation from the input\n for uuid, correct in curations.items():\n # Save the curation in the corpus\n # TODO: handle already existing curation\n stmt = corpus.statements.get(uuid)\n if stmt is None:\n logger.warning('%s is not in the corpus.' % uuid)\n continue\n corpus.curations[uuid] = correct\n # Now take all the evidences of the statement and assume that\n # they follow the correctness of the curation and contribute to\n # counts for their sources\n for ev in stmt.evidence:\n # Make the index in the curation count list\n idx = 0 if correct else 1\n extraction_rule = ev.annotations.get('found_by')\n # If there is no extraction rule then we just score the source\n if not extraction_rule:\n try:\n prior_counts[ev.source_api][idx] += 1\n except KeyError:\n prior_counts[ev.source_api] = [0, 0]\n prior_counts[ev.source_api][idx] += 1\n # Otherwise we score the specific extraction rule\n else:\n try:\n subtype_counts[ev.source_api][extraction_rule][idx] \\\n += 1\n except KeyError:\n if ev.source_api not in subtype_counts:\n subtype_counts[ev.source_api] = {}\n subtype_counts[ev.source_api][extraction_rule] = [0, 0]\n subtype_counts[ev.source_api][extraction_rule][idx] \\\n += 1\n # Finally, we update the scorer with the new curation counts\n self.scorer.update_counts(prior_counts, subtype_counts)",
"def crawl_url_links(input_dict):\n\n extractor_name=input_dict.get('extractor','DefaultExtractor')\n import requests\n label=input_dict['label']\n urls,_, source, source_date=_process_input(input_dict['input'],False)\n\n\n docs=[]\n titles=[]\n for url in urls:\n print(url)\n try:\n r = requests.get(url)\n except ConnectionError:\n continue\n if r.status_code==200:\n html=r.text\n from boilerpipe.extract import Extractor\n extractor = Extractor(extractor=extractor_name, html=html)\n\n titles.append(url)\n text=''\n if label:\n text+='!'+label+'\\t'\n text+=extractor.getText()\n docs.append(text)\n\n\n corpus_date = str(time.strftime(\"%d.%m.%Y %H:%M:%S\", time.localtime()))\n documents, labels = _process_adc(docs, False, label, titles)\n features = {\"Source\": source, \"SourceDate\": source_date, \"CorpusCreateDate\": corpus_date,\n \"Labels\": json.dumps([label]) if label else '[]'}\n\n return {\"adc\": DocumentCorpus(documents=documents, features=features)}",
"def process_turn(self):\n data = self.bot.on_turn({'map': self.encode_map(), 'player_num': PLAYER_ID})\n for action in data.get('ACTIONS', []):\n f = getattr(self, action['action_type'].lower(), lambda **k: None)\n f(**action)",
"def process_url(self, url):\n LogParser.increment_count(self.urls, url)",
"def url_shortner(self):",
"def play(strategy0, strategy1, score0=0, score1=0, goal=GOAL_SCORE):\n player = 0 # Which player is about to take a turn, 0 (first) or 1 (second)\n dice_swapped = False # Whether 4-sided dice have been swapped for 6-sided\n my_score = score0\n opponent_score = score1\n while(score0<goal and score1<goal): # 'While' loop that ends when game ends\n if(player == 0): # If it is Player0's turn...\n num_rolls = strategy0(my_score,opponent_score) # strategy for Player0 implemented\n if num_rolls == -1 and dice_swapped == False: # if strategy is Pork Chop, and current die is six sided\n my_score+=1\n dice_swapped = True\n elif num_rolls == -1 and dice_swapped == True: # if strategy is Pork Chop, and current die is four sided\n my_score+=1\n dice_swapped = False\n else: #if strategy is not Pork Chop\n dice = select_dice(my_score, opponent_score, dice_swapped)\n my_score += take_turn(num_rolls, opponent_score, dice)\n player = other(player)\n else: # If it is Player1's turn...\n num_rolls = strategy1(opponent_score,my_score)\n if num_rolls == -1 and dice_swapped == False:\n opponent_score+=1\n dice_swapped = True\n elif num_rolls == -1 and dice_swapped == True:\n opponent_score+=1\n dice_swapped = False\n else:\n dice = select_dice(opponent_score, my_score, dice_swapped)\n opponent_score = opponent_score + take_turn(strategy1(opponent_score, my_score), my_score, dice)\n player = other(player)\n if(my_score*2 == opponent_score or opponent_score*2 == my_score): #Swine Swap implementation via placeholders\n zerocounter = my_score\n onecounter = opponent_score\n score0 = onecounter\n my_score = onecounter\n score1 = zerocounter\n opponent_score = zerocounter\n else: #Final reassignments to original score variables before return statement\n score0 = my_score\n score1 = opponent_score\n# END PROBLEM 5\n return score0, score1"
]
| [
"0.53658974",
"0.49933642",
"0.46277124",
"0.45726877",
"0.4496522",
"0.44600248",
"0.44595665",
"0.43833953",
"0.4374575",
"0.43299955",
"0.43068346",
"0.42801183",
"0.42527068",
"0.42434984",
"0.42205715",
"0.42046285",
"0.41844088",
"0.41649348",
"0.41626805",
"0.4157128",
"0.41440368",
"0.41342804",
"0.41226843",
"0.4118028",
"0.41098315",
"0.4108742",
"0.40922093",
"0.4090942",
"0.40884727",
"0.40748146"
]
| 0.5252377 | 1 |
Fires on broadcast messages | def process_broadcast(data):
logger.info(f"Broadcast: {data}") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_broadcast_message(self):\n\n typhoonae.websocket.broadcast_message('My broadcast message.')",
"def broadcast(self, message):\n self._send('broadcast', message)",
"def broadcast():\n # global receiving_message\n # if not receiving_message:\n router.broadcast(clients.copy(), json.dumps(current_state))",
"def onRegisterNetworkBroadcast(self):\n pass",
"def broadcast(self, clients, msg):\n self.server.broadcast(clients, msg)",
"def on_message(data):\n pass",
"def broadcast_event(self, name, sender, *args, **kwargs):\n for addon in self.connection_bridges[sender]:\n addon.receive_event(sender=sender, name=name, *args, **kwargs)",
"def broadcast(msg):\r\n for user in clients:\r\n msg_client(msg, user)",
"def broadcast(self, msg):\n asyncio.run_coroutine_threadsafe(self.coro.broadcast(msg), self._robot._event_loop)",
"def broadcast(self, message):\r\n for c in self.characters:\r\n c.notify(message)",
"def log_broadcast(bcast):\n logger.info(\"Broadcast received: {bcast}\")",
"def broadcast(self, msg):\n for client in self.clients.values():\n send_data(client.socket, msg)",
"def broadcast(self, session, params):\n session.set_status('running')\n FMT = self.udp_schema['format']\n FMT_LEN = struct.calcsize(FMT)\n UDP_PORT = self.udp['port']\n udp_data = []\n fields = self.udp_schema['fields']\n session.data = {}\n\n # BroadcastStreamControl instance.\n stream = self.acu_control.streams['main']\n\n class MonitorUDP(protocol.DatagramProtocol):\n def datagramReceived(self, data, src_addr):\n host, port = src_addr\n offset = 0\n while len(data) - offset >= FMT_LEN:\n d = struct.unpack(FMT, data[offset:offset + FMT_LEN])\n udp_data.append(d)\n offset += FMT_LEN\n\n handler = reactor.listenUDP(int(UDP_PORT), MonitorUDP())\n influx_data = {}\n influx_data['Time_bcast_influx'] = []\n for i in range(2, len(fields)):\n influx_data[fields[i].replace(' ', '_') + '_bcast_influx'] = []\n\n active = True\n last_packet_time = time.time()\n\n while session.status in ['running']:\n now = time.time()\n if len(udp_data) >= 200:\n if not active:\n self.log.info('UDP packets are being received.')\n active = True\n last_packet_time = now\n\n process_data = udp_data[:200]\n udp_data = udp_data[200:]\n for d in process_data:\n data_ctime = sh.timecode(d[0] + d[1] / sh.DAY)\n self.data['broadcast']['Time'] = data_ctime\n influx_data['Time_bcast_influx'].append(data_ctime)\n for i in range(2, len(d)):\n self.data['broadcast'][fields[i].replace(' ', '_')] = d[i]\n influx_data[fields[i].replace(' ', '_') + '_bcast_influx'].append(d[i])\n acu_udp_stream = {'timestamp': self.data['broadcast']['Time'],\n 'block_name': 'ACU_broadcast',\n 'data': self.data['broadcast']\n }\n self.agent.publish_to_feed('acu_udp_stream',\n acu_udp_stream, from_reactor=True)\n influx_means = {}\n for key in influx_data.keys():\n influx_means[key] = np.mean(influx_data[key])\n influx_data[key] = []\n acu_broadcast_influx = {'timestamp': influx_means['Time_bcast_influx'],\n 'block_name': 'ACU_bcast_influx',\n 'data': influx_means,\n }\n self.agent.publish_to_feed('acu_broadcast_influx', acu_broadcast_influx, from_reactor=True)\n sd = {}\n for ky in influx_means:\n sd[ky.split('_bcast_influx')[0]] = influx_means[ky]\n session.data.update(sd)\n else:\n # Consider logging an outage, attempting reconfig.\n if active and now - last_packet_time > 3:\n self.log.info('No UDP packets are being received.')\n active = False\n next_reconfig = time.time()\n if not active and params['auto_enable'] and next_reconfig <= time.time():\n self.log.info('Requesting UDP stream enable.')\n try:\n cfg, raw = yield stream.safe_enable()\n except Exception as err:\n self.log.info('Exception while trying to enable stream: {err}', err=err)\n next_reconfig += 60\n yield dsleep(1)\n\n yield dsleep(0.005)\n\n handler.stopListening()\n return True, 'Acquisition exited cleanly.'",
"def broadcast(msg):\n\n for sock in clients:\n sock.send(bytes(msg, \"utf-8\"))",
"def broadcast(self, message: str) -> int:\n\t\tsubs_notified_so_far = []\n\t\treceiver_count = 0\n\t\tfor event in self.events:\n\t\t\tfor subscriber, callback in self.get_subscribers(event).items():\n\t\t\t\tif subscriber not in subs_notified_so_far:\n\t\t\t\t\tcallback(\"broadcast\", message)\n\t\t\t\t\tsubs_notified_so_far.append(subscriber)\n\t\t\t\t\treceiver_count += 1\n\t\treturn receiver_count",
"def broadcast(self, message, *args):\n\t\tmethod = getattr(self, message, None)\n\t\tif method:\n\t\t\tmethod(*args)",
"def broadcast(self,message_type,message):\n for socket in self.connections:\n if socket != self.server_socket:\n self.sendToSocket(socket,message_type,message)",
"def broadcast_event(self, event, data):\n for ctx in self.manager.all():\n self.pool.notify((ctx.on_event, (event, data), EMPTY_DICT))",
"def send_messages(self):\n if self.messages:\n messages, self.messages = self.messages, []\n self.mpub(\"events.%s\" % config.pool, messages)",
"def broadcast(self,msg, UDP=False):\n if DEBUG: print \"class GlabPythonManager, function: broadcast\"\n if DEBUG and len(msg) < 10000: print \"class GlabPythonManager, function: broadcast\"\n \n if UDP: \n self.multicast.protocol.send(msg)\n return\n \n for key, connection in self.connection_manager.default_websocket_connections.iteritems():\n try:\n pass\n self.connection_manager.send(msg,connection)\n except AttributeError:\n if DEBUG: print \"Error: Failed to send broadcast\"\n pass \n \n '''\n for key, peer_server in self.connection_manager.peer_servers.iteritems():\n if not peer_server.ip == '10.1.1.112':\n continue\n try:\n self.connection_manager.send(msg,peer_server)\n except AttributeError:\n if DEBUG: print \"Error: Failed to send broadcast\"\n pass\n '''\n \n \n for key, connection in self.listener.openConnections.iteritems():\n continue\n try:\n if DEBUG: print \"broadcasting to the protocol:\", connection.ConnectionUID\n connection.transport.write(msg)\n except AttributeError:\n if DEBUG: print \"Error: Failed to send broadcast\"\n pass\n \n \n #for client in self.wsfactory.openConnections.keys():\n #self.wsfactory.openConnections[client].sendMessage(messagestring)",
"def send(self, event, message):\n pass",
"def broadcast(data):\n for client in CLIENTS:\n client.write_message(data)",
"def broadcast(self, msg_type, msg, t=5):\n return None",
"def broadcast(self, message):\n for s in self.connections:\n s.send(bytes(message, encoding='utf-8'))",
"def onMessage(self):\n \"\"\"\n Validates that the received message is from a student and then broadcasts the message to the rest of the class.\n\n @param self: self is the instance of this object.\n @param message: the message that is received\n @param student: the student that sent the message\n \"\"\"\n pass",
"def send_notification (event):\n Publisher.sendMessage (event)",
"def _broadcast(self, msg: str) -> None:\n from jesse.routes import router\n\n for r in router.routes:\n # skip self\n if r.strategy.id == self.id:\n continue\n\n if msg == 'route-open-position':\n r.strategy.on_route_open_position(self)\n elif msg == 'route-close-position':\n r.strategy.on_route_close_position(self)\n elif msg == 'route-increased-position':\n r.strategy.on_route_increased_position(self)\n elif msg == 'route-reduced-position':\n r.strategy.on_route_reduced_position(self)\n elif msg == 'route-canceled':\n r.strategy.on_route_canceled(self)\n\n r.strategy._detect_and_handle_entry_and_exit_modifications()",
"def EnableBroadcast(self) -> bool:",
"def EnableBroadcast(self) -> bool:",
"def broadcast(message):\n for client in CLIENTS:\n client.send(message)"
]
| [
"0.71514267",
"0.6888148",
"0.688351",
"0.6738276",
"0.66956115",
"0.6646559",
"0.663535",
"0.6584327",
"0.65600497",
"0.6499609",
"0.6485581",
"0.64688635",
"0.6445526",
"0.64237297",
"0.64032906",
"0.63955146",
"0.63819927",
"0.63773906",
"0.63745636",
"0.6364013",
"0.6323783",
"0.6318811",
"0.630907",
"0.62333894",
"0.6232286",
"0.6220827",
"0.6211069",
"0.62085193",
"0.62085193",
"0.61794883"
]
| 0.70121545 | 1 |
A function to get necessary labels for the waiting times of different thresholds | def get_plot_for_different_thresholds_labels(measurement_type):
if measurement_type == "w":
title = "Waiting times over different thresholds"
y_axis_label = "Waiting Time"
elif measurement_type == "b":
title = "Blocking times over different thresholds"
y_axis_label = "Blocking Time"
else:
title = "Waiting and blocking times over different thresholds"
y_axis_label = "Waiting and Blocking Time"
x_axis_label = "Capacity Threshold"
return (x_axis_label, y_axis_label, title) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def label(self, t):\n if self.labels is None:\n return None\n prev_label = None\n for l in self.labels:\n if l.time > t:\n break\n prev_label = l\n if prev_label is None:\n return None\n return prev_label.name",
"def assign_labels(data, holding_period=pd.Timedelta(hours=1),\\\n volatility_window=pd.Timedelta(hours=1), factors = [2, 2]):\n\n data = data.assign(tickDirection=_get_direction(data.close),\n closeLag1Hr=_get_ts_lag(data.close, lag=pd.Timedelta(hours=1)))\n data = data.assign(return1Hr=data.closeLag1Hr/data.close - 1).dropna()\n\n # add thresholds and vertical barrier (t1) columns\n data = data.assign(threshold=_get_volatility(data.close, delta=volatility_window), \n t1=_get_verticals(data, delta=holding_period)).dropna()\n\n # events are [t1, threshold, side]\n events = data[['t1', 'threshold']]\n events = events.assign(side=pd.Series(1., events.index)) # long only\n\n # get the timestamps for [t1, stop_loss, take_profit]\n touches = _get_horizontals(data, factors)\n # assign labels based on which barrier is hit first\n touches = _get_labels(touches)\n\n # add touches timestamps and label\n data = pd.concat( [data.loc[:, 'vwap':'threshold'], \n touches.loc[:, 't1':'label']], axis=1)\n \n return data",
"def add_labels(df, binary=True, DELAY_THRESHOLD=20, categorical=False):\n\n def delay_class(minutes):\n if minutes <= 5:\n return 0\n if 5 < minutes <= 20:\n return 1\n if 20 < minutes <= 60:\n return 2\n if 60 < minutes <= 120:\n return 3\n if 120 < minutes:\n return 4\n else:\n return None\n\n if binary and not categorical:\n # add the target label \"binary: delayed (positive) not-delayed (negative)\" based on the threshold in minutes\n df['DELAYED'] = df['DEP_DELAY'].apply(lambda x: 1 if x >= DELAY_THRESHOLD else 0)\n\n # balance the data (same number of samples for delayed / not delayed flights)\n delayed = df[df['DELAYED'] == 1].copy()\n no_delay = df[df['DELAYED'] == 0][:delayed.shape[0]].copy()\n\n # concat into one dateframe\n data = delayed.append(no_delay, ignore_index=True)\n # logging\n percentage = delayed_percentage(df, DELAY_THRESHOLD)\n print('{:.2f}% of the total flights were delayed {} minutes or more.'.format(percentage, DELAY_THRESHOLD))\n\n del delayed, no_delay, df # release some memory\n\n elif categorical:\n df['DELAY_CLASS'] = df['DEP_DELAY'].apply(lambda row: delay_class(row))\n counts = df['DELAY_CLASS'].value_counts()\n m = min(counts)\n c0 = df[df['DELAY_CLASS'] == 0][:m].copy()\n c1 = df[df['DELAY_CLASS'] == 1][:m].copy()\n c2 = df[df['DELAY_CLASS'] == 2][:m].copy()\n c3 = df[df['DELAY_CLASS'] == 3][:m].copy()\n c4 = df[df['DELAY_CLASS'] == 4][:m].copy()\n data = c0.append([c1, c2, c3, c4])\n data['DELAY_CLASS'] = data['DELAY_CLASS'].astype(int)\n del c0, c1, c2, c3, c4 # release memory\n else:\n raise('either of binary or categorical must be true')\n\n # shuffle dataframe\n data = data.sample(frac=1).reset_index(drop=True)\n\n return data",
"def qc_BeamLogs(beam_threshold):\n \n QC_BEAM_LABEL = []\n\n for i in range(0,36):\n b_threshold = beam_threshold[i] #index 0 corresponds to beam 26 as input threshold is sorted by beam position. \n if (b_threshold >= 0.05): \n QC_BEAM_LABEL.append('bad')\n elif (0.03 <= b_threshold < 0.05):\n QC_BEAM_LABEL.append('ok')\n else:\n QC_BEAM_LABEL.append('good')\n\n return QC_BEAM_LABEL",
"def find_labels(df_in, period):\n \n #create regression label\n \n #make a dataframe to hold the last cycle for each enginge in the dataset\n df_max_cycle = pd.DataFrame(df_in.groupby(['id.engine.id','id.maintenanceIndex'])['id.cycle'].max())\n df_max_cycle.reset_index(inplace=True)\n df_max_cycle.columns = ['id.engine.id','id.maintenanceIndex', 'lastCycle']\n \n #add time-to-failure ttf as a new column - regression label\n df_in = pd.merge(df_in, df_max_cycle, on=['id.engine.id','id.maintenanceIndex'])\n df_in['labels.ttf'] = df_in['lastCycle'] - df_in['id.cycle']\n #df_in.drop(['lastCycleReached'], axis=1, inplace=True)\n \n #create binary classification label\n df_in['labels.bnc'] = df_in['labels.ttf'].apply(lambda x: 1 if x <= period else 0)\n \n #create multi-class classification label\n df_in['labels.mcc'] = df_in['labels.ttf'].apply(lambda x: 2 if x <= period/2 else 1 if x <= period else 0)\n \n return df_in",
"def _label_rider_by_trip_frequency(self, rider):\n if rider['total_num_trips'] <= 5*self.duration:\n label = 0\n elif rider['total_num_trips'] <= 20*self.duration:\n label = 1\n elif rider['total_num_trips'] > 20*self.duration:\n label = 2\n else:\n label = -1\n return label",
"def get_times_and_labels(records, measurement_type):\n if measurement_type == \"w\":\n mean_time = [np.nanmean(w.waiting_times) for w in records]\n title = \"Distributions of waiting times over runtimes\"\n y_axis_label = \"Waiting Times\"\n else:\n mean_time = [np.nanmean(b.blocking_times) for b in records]\n title = \"Distributions of blocking times over runtimes\"\n y_axis_label = \"Blocking Times\"\n return mean_time, title, y_axis_label",
"def _get_tbd_result(self):\n return (self.threshold, self.num_intervals[0], self.num_intervals[1], self.num_intervals[2])",
"def scheduled_task_labels(self):\n tasks = self._get_decision_artifact('task-graph.json').values()\n return {t['label'] for t in tasks}",
"def get_labels():\n return {\"contradiction\": 0, \"neutral\": 1, \"entailment\": 2}",
"def get_labels(self):\n return [\"00:00\", \"04:00\", \"08:00\", \"12:00\", \"16:00\", \"20:00\", \"00:00\"]",
"def get_labels(self):\n return [\"00:00\", \"04:00\", \"08:00\", \"12:00\", \"16:00\", \"20:00\", \"00:00\"]",
"def get_labels(self):\n return [\"00:00\", \"04:00\", \"08:00\", \"12:00\", \"16:00\", \"20:00\", \"00:00\"]",
"def label_lvl(a, thlds, labels):\n if len(labels) != len(thlds) + 1:\n raise ValueError(\"Must be one more label than number of thresholds\")\n lvl_indexes = index_lvl(a, thlds)\n return np.take(labels, lvl_indexes)",
"def Thresholds(self) :\n \n # keep pass through thresholds\n d = { }\n\n from Hlt2Lines.Commissioning.Lines import CommissioningLines \n d.update({CommissioningLines :\n {'Prescale' : {'Hlt2PassThrough' : 0.0001,\n 'Hlt2Forward' : 0.00001,\n 'Hlt2DebugEvent' : 0.000001},\n 'Postscale' : {'Hlt2ErrorEvent' : 'RATE(0.01)'},\n # do not want debug events on lumi-exclusive Hlt1 events...\n 'DebugEvent' : {'HLT1' : \"HLT_PASS_RE('^Hlt1(?!Lumi).*Decision$')\"},\n 'ErrorEvent' : {'Priority' : 254,\n 'VoidFilter' : '',\n 'HLT2' : \"HLT_COUNT_ERRORBITS_RE('^Hlt2.*',0xffff) > 0\"},\n 'PassThrough' : {'HLT1' : \"HLT_PASS_RE('^Hlt1(?!Lumi).*Decision$')\",\n 'VoidFilter' : ''},\n 'NoBiasPassThrough' : {'HLT1' : \"HLT_PASS('Hlt1NoBiasPrescaledDecision')\",\n 'VoidFilter' : ''},\n 'Transparent' : {'HLT1' : \"HLT_PASS_RE('^Hlt1(ODIN.*|L0.*|MB.*|BeamGas.*|Velo.*|NZS.*|Incident|Tell1Error|ErrorEvent)Decision$')\",\n 'VoidFilter' : ''},\n 'Lumi' : {'HLT1' : \"HLT_PASS_SUBSTR('Hlt1Lumi')\",\n 'VoidFilter' : ''},\n 'KS0_DD' : {'HLT1' : \"HLT_PASS_RE('^Hlt1(?!Lumi).*Decision$')\",\n 'VoidFilter' : ''},\n 'KS0_LL' : {'HLT1' : \"HLT_PASS_RE('^Hlt1(?!Lumi).*Decision$')\",\n 'VoidFilter' : ''},\n 'Turbo' : ['KS0_DD', 'KS0_LL']\n }}\n )\n return d",
"def get_best_thresholds(labels, test_y, outputs, plot=False):\n t_max = [0] * len(labels)\n f_max = [0] * len(labels)\n\n for i, label in enumerate(labels):\n ts = []\n fs = []\n\n for t in np.linspace(0.1, 0.99, num=50):\n p, r, f, _ = precision_recall_fscore_support(test_y[:,i], np.where(outputs[:,i]>t, 1, 0), average='micro')\n ts.append(t)\n fs.append(f)\n if f > f_max[i]:\n f_max[i] = f\n t_max[i] = t\n\n if plot:\n print(f'LABEL: {label}')\n print(f'f_max: {f_max[i]}')\n print(f't_max: {t_max[i]}')\n\n plt.scatter(ts, fs)\n plt.show()\n \n return t_max, f_max",
"def get_beat_label(labels):\n # calculate the count of each beat type in the frame\n beat_counts = np.bincount(labels, minlength=len(ds_beat_names))\n most_hp_beats = np.argmax(beat_counts[_HI_PRIO_BEATS])\n if beat_counts[_HI_PRIO_BEATS][most_hp_beats] > 0:\n y = _HI_PRIO_BEATS[most_hp_beats]\n else:\n most_lp_beats = np.argmax(beat_counts[_LO_PRIO_BEATS])\n # handle the case of no detected beats\n if beat_counts[_LO_PRIO_BEATS][most_lp_beats] > 0:\n y = _LO_PRIO_BEATS[most_lp_beats]\n else:\n y = 0 # undefined beat\n return y",
"def _get_labels(touches):\n \n out = touches.copy(deep=True)\n # pandas df.min() ignores NaN values\n first_touch = touches[['stop_loss', 'take_profit']].min(axis=1)\n for loc, t in first_touch.items():\n if pd.isnull(t):\n out.loc[loc, 'label'] = 0\n elif t == touches.loc[loc, 'stop_loss']:\n out.loc[loc, 'label'] = -1\n else:\n out.loc[loc, 'label'] = 1\n return out",
"def waiting_times(all_data):\n print('Computing waiting times')\n result = {'p': [], 'alpha': [], 'durations': []}\n for data in all_data:\n N = data['config']['N']\n p = data['config']['p']\n alpha = data['config']['alpha']\n print(f'p = {p}, alpha = {alpha}')\n\n # find dominant strategy at each point in time\n print(' > Finding dominant strategies')\n dom_strats = np.asarray(list(map(lambda e: get_dominant_strategy(e), data['snapshots'])))\n print(f' >> Found {np.unique(dom_strats).size} unique strategies')\n\n if np.unique(dom_strats).size <= 1:\n print(' >> Skipping')\n continue\n\n # detect dominant strategy changes (and durations)\n print(' > Computing durations')\n durations = get_domain_durations(dom_strats)\n durations /= N**2\n print(f' >> Found {durations.size} durations')\n\n # store result\n result['p'].extend([p]*len(durations))\n result['alpha'].extend([alpha]*len(durations))\n result['durations'].extend(durations)\n\n df = pd.DataFrame(result)\n\n # plot w-time distributions\n print(' > Plotting')\n for p in df['p'].unique():\n sub = df[df['p']==p]\n\n plt.figure()\n for alpha, group in sub.groupby(['alpha']):\n sns.distplot(\n group['durations'],\n kde=False, label=rf'$\\alpha={alpha}$')\n\n plt.title(rf'Distribution of waiting times ($p={p}$)')\n plt.xlabel(r'$\\Delta t$')\n plt.ylabel(r'count')\n plt.legend(loc='best')\n\n plt.savefig(f'images/waiting_times_p{p}.pdf')\n\n ## plot wtd dependence on parameters\n plt.figure()\n sns.boxplot(x='alpha', y='durations', hue='p', data=df)\n plt.savefig('images/waiting_times_vs_alpha.pdf')\n plt.close()\n\n return df",
"def get_tpr_from_threshold(scores,labels, threshold_list):\n tpr_list = []\n hack_scores = []\n for score, label in zip(scores,labels):\n if label == 1:\n hack_scores.append(float(score))\n hack_scores.sort(reverse=True)\n hack_nums = len(hack_scores)\n for threshold in threshold_list:\n hack_index = 0\n while hack_index < hack_nums:\n if hack_scores[hack_index] <= threshold:\n break\n else:\n hack_index += 1\n if hack_nums != 0:\n tpr = hack_index * 1.0 / hack_nums\n else:\n tpr = 0\n tpr_list.append(tpr)\n return tpr_list",
"def receiver_operation_curve(test_confidence, test_labels, label_range):\n\n test_confidence = np.array(test_confidence)\n\n # compute actual number of positive and negative instances\n num_instance = len(test_confidence)\n num_true_pos = sum(np.array([label_range[0] == test_labels[i] for i in range(num_instance)]))\n num_true_neg = num_instance - num_true_pos\n\n # for each threshold, compute the TP and FP\n ROC_array = []\n\n zipped = zip(test_confidence, test_labels)\n zipped.sort(key = lambda t: t[0]) # sort confidence and label based on confidence, ascending order\n zipped.reverse() # sort the confidence from high to low, descending order\n [test_confidence, test_labels] = zip(*zipped)\n\n # set cutoff at each point when the instance label changes\n cutoff = []\n cutoff.append(1)\n for i in range(num_instance):\n if i == 0:\n cutoff.append(test_confidence[0])\n current_state = test_labels[0]\n else:\n if current_state == test_labels[i]:\n continue\n else:\n current_state = test_labels[i]\n cutoff.append(test_confidence[i-1])\n cutoff.append(test_confidence[i])\n cutoff.append(0)\n\n for cf in cutoff:\n # compute true positive and false positive\n TP = 0\n FP = 0\n for i in range(num_instance):\n if test_confidence[i] < cf:\n break\n else:\n if label_range[0] == test_labels[i]:\n TP += 1\n elif label_range[0] != test_labels[i]:\n FP += 1\n TP_rate = 1.0 * TP / num_true_pos\n FP_rate = 1.0 * FP / num_true_neg\n ROC_array.append([FP_rate, TP_rate])\n\n return ROC_array",
"def cvpr2018_labels():\n\n return {\n 0: 'others',\n 33: 'car',\n 34: 'motorcycle',\n 35: 'bicycle',\n 36: 'pedestrian',\n 38: 'truck',\n 39: 'bus',\n 40: 'tricycle'\n }",
"def Thresholds(self) :\n \n # keep pass through thresholds\n d = { }\n\n\n from Hlt2Lines.SingleMuon.Lines import SingleMuonLines\n d.update({SingleMuonLines : \n {'Prescale' : {\"Hlt2SingleMuon\" : 0.5, \n \"Hlt2SingleMuonLowPT\" : 0.1},\n 'HltReq' : {\"SingleMuon\" : \"HLT_PASS_RE('Hlt1TrackMuonDecision')\"},\n 'Common' : {'TrChi2' : 3, # Adimensional\n 'Pt' : 1300 * MeV },\n 'SingleMuon' : {'IP' : 0.0 * mm,\n 'IPChi2' : 16 }, # Adimensional\n 'HighPT' : {'HighPt' : 10000 *MeV },\n 'LowPT' : { 'HighPt' : 4800 * MeV }\n }\n })\n \n return d",
"def Thresholds(self) :\n\n from Hlt1Lines.Hlt1TrackLines import Hlt1TrackLinesConf\n from Hlt1Lines.Hlt1MuonLines import Hlt1MuonLinesConf\n from Hlt1Lines.Hlt1ElectronLines import Hlt1ElectronLinesConf\n from Hlt1Lines.Hlt1L0Lines import Hlt1L0LinesConf\n from Hlt1Lines.Hlt1MBLines import Hlt1MBLinesConf\n from Hlt1Lines.Hlt1CommissioningLines import Hlt1CommissioningLinesConf\n from Hlt1Lines.Hlt1DisplVertexLines import Hlt1DisplVertexLinesConf\n from Hlt2Lines.Hlt2CommissioningLines import Hlt2CommissioningLinesConf\n from Hlt1Lines.Hlt1BeamGasLines import Hlt1BeamGasLinesConf\n from Hlt2Lines.Hlt2diphotonDiMuonLines import Hlt2diphotonDiMuonLinesConf\n from Hlt2Lines.Hlt2InclusiveDiProtonLines import Hlt2InclusiveDiProtonLinesConf\n from Hlt2Lines.Hlt2DisplVerticesLines import Hlt2DisplVerticesLinesConf\n\n thresholds = { Hlt1TrackLinesConf : { 'AllL0Tight_PT' : 1700\n , 'AllL0Tight_P' : 3000\n , 'AllL0Tight_IP' : 0.100\n , 'AllL0Tight_IPChi2' : 16 \n , 'AllL0Tight_TrChi2' : 1.5 \n , 'AllL0Tight_GEC' : 'Loose'\n , 'AllL0Tight_Velo_NHits' : 9 \n , 'AllL0Tight_Velo_Qcut' : 3 \n , 'AllL0Tight_ValidateTT' : True \n , 'AllL0_PT' : 1600\n , 'AllL0_P' : 3000\n , 'AllL0_IP' : 0.100\n , 'AllL0_IPChi2' : 16\n , 'AllL0_TrChi2' : 2.0\n , 'AllL0_GEC' : 'Loose'\n , 'AllL0_Velo_NHits' : 9 \n , 'AllL0_Velo_Qcut' : 3 \n , 'AllL0_ValidateTT' : True \n , 'Muon_PT' : 1000 \n , 'Muon_P' : 3000 \n , 'Muon_IP' : 0.100\n , 'Muon_IPChi2' : 16\n , 'Muon_TrChi2' : 2.5 \n , 'Muon_GEC' : 'Loose'\n , 'Muon_ValidateTT' : False\n , 'Muon_L0Channels' : 'Muon,DiMuon,MuonNoSPD,DiMuonNoSPD' \n , 'Photon_PT' : 1200\n , 'Photon_P' : 3000\n , 'Photon_IP' : 0.100\n , 'Photon_IPChi2' : 16\n , 'Photon_TrChi2' : 2.0\n , 'Photon_L0Channels' : 'PhotonHi,ElectronHi' \n , 'Photon_GEC' : 'Loose'\n , 'Photon_ValidateTT' : True\n , 'Prescale' : {'Hlt1TrackAllL0' : 1.0, \n 'Hlt1TrackAllL0Tight' : 1.0,\n 'Hlt1TrackForwardPassThrough' : 0,\n 'Hlt1TrackForwardPassThroughLoose' : 0}\n \n }\n , Hlt1ElectronLinesConf : { 'SingleElectronNoIP_P' : 20000\n , 'SingleElectronNoIP_PT' : 10000\n , 'SingleElectronNoIP_TrChi2' : 3\n , 'SingleElectronNoIP_TrNTHits' : 0 #OFF\n , 'SingleElectronNoIP_Velo_NHits' : 0 #OFF\n , 'SingleElectronNoIP_Velo_Qcut' : 999 #OFF\n , 'SingleElectronNoIP_GEC' : 'Loose'\n , 'L0Channels': { 'SingleElectronNoIP' : ( 'Electron', ) }\n }\n , Hlt1MuonLinesConf : { 'SingleMuonHighPT_P' : 3000\n , 'SingleMuonHighPT_PT' : 4800\n , 'SingleMuonHighPT_TrChi2' : 3.\n , 'SingleMuonHighPT_GEC' : 'Loose'\n , 'SingleMuonNoIP_P' : 3000\n , 'SingleMuonNoIP_PT' : 1300\n , 'SingleMuonNoIP_TrChi2' : 3.\n , 'SingleMuonNoIP_GEC' : 'Loose'\n , 'DiMuonLowMass_VxDOCA' : 0.2\n , 'DiMuonLowMass_VxChi2' : 25\n , 'DiMuonLowMass_P' : 0\n , 'DiMuonLowMass_PT' : 0\n , 'DiMuonLowMass_TrChi2' : 3\n , 'DiMuonLowMass_M' : 0.\n , 'DiMuonLowMass_IPChi2' : 6.\n , 'DiMuonLowMass_GEC' : 'Loose'\n , 'DiMuonHighMass_VxDOCA' : 0.2\n , 'DiMuonHighMass_VxChi2' : 25\n , 'DiMuonHighMass_P' : 3000\n , 'DiMuonHighMass_PT' : 500\n , 'DiMuonHighMass_TrChi2' : 3\n , 'DiMuonHighMass_M' : 2700\n , 'DiMuonHighMass_GEC' : 'Loose'\n , 'MultiMuonNoIP_P' : 3000\n , 'MultiMuonNoIP_PT' : 500\n , 'MultiMuonNoIP_TrChi2' : 3.\n , 'MultiMuonNoIP_GT' : 2.5\n , 'MultiMuonNoIP_GEC' : 'Loose'\n ,'L0Channels' : {\n 'SingleMuonHighPT' : ( 'Muon', 'MuonNoSPD'),\n 'SingleMuonNoIP' : ( 'Muon', 'MuonNoSPD'),\n 'DiMuonLowMass' : ( 'Muon', 'MuonNoSPD', 'DiMuon', 'DiMuonNoSPD' ),\n 'DiMuonHighMass' : ( 'Muon', 'MuonNoSPD', 'DiMuon', 'DiMuonNoSPD' ),\n 'MultiMuonNoIP' : ( 'Muon', 'MuonNoSPD', 'DiMuon', 'DiMuonNoSPD' ) }\n\n , 'Prescale' : { 'Hlt1SingleMuonNoIP' : 0.01,\n 'Hlt1MultiMuonNoIP' : 0.0 }\n }\n , Hlt1L0LinesConf : { 'Postscale' : { 'Hlt1L0AnyRateLimited' : 'RATE(1)'\n , 'Hlt1L0AnyNoSPDRateLimited' : 'RATE(1)'\n }\n , 'Prescale' : { 'Hlt1L0HighSumETJet' : 1 \n , 'Hlt1L0AnyNoSPD' : 0.01\n }\n }\n , Hlt1BeamGasLinesConf : {\n 'Prescale' : { 'Hlt1BeamGasCrossingForcedRecoFullZ': 0.001 }\n , 'Postscale' : { 'Hlt1BeamGasNoBeamBeam1' : 'RATE(0.5)'\n , 'Hlt1BeamGasNoBeamBeam2' : 'RATE(0.5)'\n , 'Hlt1BeamGasBeam1' : 'RATE(2)'\n , 'Hlt1BeamGasBeam2' : 'RATE(2)'\n , 'Hlt1BeamGasCrossingEnhancedBeam1' : 'RATE(0)'\n , 'Hlt1BeamGasCrossingEnhancedBeam2' : 'RATE(0)'\n , 'Hlt1BeamGasCrossingForcedReco' : 'RATE(0.5)'\n , 'Hlt1BeamGasCrossingForcedRecoFullZ': 'RATE(0.5)'\n , 'Hlt1BeamGasCrossingParasitic' : 'RATE(1)'\n , 'Hlt1BeamGasHighRhoVertices' : 'RATE(4)'\n }\n }\n , Hlt1DisplVertexLinesConf: { \"VertexDisplVertex_DOCABL\" : \"2.0*mm\"\n , \"VertexDisplVertex_VELO_NSP\" : \"3\"\n , \"VertexDisplVertex_VELO_NCSP\" : \"2\"\n , \"VertexDisplVertex_VELOTrChi2\": \"2.5\"\n , \"VertexDisplVertex_VX_DOCA\" : \"0.3*mm\"\n , \"VertexDisplVertex_VX_CHI2\" : \"1000000000.0\"\n , \"VertexDisplVertex_VX_RHO\" : \"12.0*mm\"\n , \"VertexDisplVertex_Tr_P\" : \"10.0*GeV\"\n , \"VertexDisplVertex_Tr_PT\" : \"1.7*GeV\"\n , \"VertexDisplVertex_Tr_CHI2\" : \"2.5\"\n , \"VertexDisplVertex_GEC\" : \"Loose\"\n , \"Prescale\" : {'Hlt1VertexDisplVertex':1.0 }\n }\n #, Hlt1ProtonLinesConf : { 'Prescale' : { 'Hlt1DiProtonLowMult' : 0.01, \n # 'Hlt1DiProton' : 0.01\n # } } \n , Hlt1CommissioningLinesConf : { 'Postscale' : { 'Hlt1ErrorEvent' : 'RATE(0.01)'\n\n } }\n , Hlt2CommissioningLinesConf : { 'Prescale' : { 'Hlt2PassThrough' : 0.0001 \n , 'Hlt2Forward' : 0.00001\n , 'Hlt2DebugEvent' : 0.000001 }\n , 'Postscale' : { 'Hlt2ErrorEvent' : 'RATE(0.01)' } }\n # micro bias lines switched off for high mu physics running \n , Hlt1MBLinesConf : { 'Prescale' : { 'Hlt1MBMicroBiasVelo' : 0\n , 'Hlt1MBMicroBiasTStation' : 0\n , 'Hlt1MBMicroBiasVeloRateLimited' : 0\n , 'Hlt1MBMicroBiasTStationRateLimited' : 0 }\n , 'MaxNoBiasRate' : 1000000.\n }\n , Hlt2diphotonDiMuonLinesConf : { 'Prescale' : { 'Hlt2LowMultHadron' : 1.0 # for 0x0035, this is already done in L0\n , 'Hlt2LowMultPhoton' : 0.01\n } } \n\n #, Hlt2InclusiveDiProtonLinesConf: { 'Prescale' : { 'Hlt2DiProton' : 0.001\n # , 'Hlt2DiProtonLowMult' : 0.001\n # } } \n\n , Hlt2DisplVerticesLinesConf : { 'Prescale' : \n { 'Hlt2DisplVerticesHighMassSingle' : 1\n , 'Hlt2DisplVerticesSingle' : 1\n , 'Hlt2DisplVerticesDouble' : 1\n , 'Hlt2DisplVerticesHighMassSingle' : 1 \n , 'Hlt2DisplVerticesHighFDSingle' : 1\n , 'Hlt2DisplVerticesSinglePostScaled' : 1\n , 'Hlt2DisplVerticesSingleDown' : 1\n , 'Hlt2DisplVerticesDoublePostScaled' : 1\n , 'Hlt2DisplVerticesSingleHighMassPostScaled' : 1\n , 'Hlt2DisplVerticesSingleHighFDPostScaled' : 1\n , 'Hlt2DisplVerticesSingleMVPostScaled' : 1 } \n }\n\n }\n\n\n from Muons_April2012 import Muons_April2012\n __update_conf__(thresholds, Muons_April2012().Thresholds() )\n\n from Electrons_July2011 import Electrons_July2011\n __update_conf__(thresholds, Electrons_July2011().Thresholds() )\n\n from Hadrons_September2012 import Hadrons_September2012\n __update_conf__(thresholds, Hadrons_September2012().Thresholds() )\n\n from DV_draft2012 import DV_draft2012\n __update_conf__(thresholds, DV_draft2012().Thresholds() )\n\n from CharmLeptonic_draft2012 import CharmLeptonic_draft2012\n __update_conf__(thresholds, CharmLeptonic_draft2012().Thresholds() )\n\n return thresholds",
"def get_tpx_labels_abs():\n\t#labels_abs = [\"tpx_all_abs\", \"tpx_date_abs\", \"tpx_time_abs\", \"tpx_duration_abs\", \"tpx_set_abs\", \"tpx_date_none_abs\", \"tpx_date_year_abs\", \n\t#\"tpx_date_year_month_abs\", \"tpx_date_month_abs\", \"tpx_date_day_abs\", \"tpx_date_month_day_abs\", \"tpx_date_any_abs\", \"tpx_date_full_abs\",\n\t#\"tpx_date_past_ref_abs\", \"tpx_date_present_ref_abs\", \"tpx_date_future_ref_abs\",\n\t#\"tpx_date_any_chapter_first_abs\", \"tpx_date_any_chapter_other_abs\", \"tpx_date_any_chapter_other_mean_abs\"]\n\tlabels_abs = [\"tpx_all_abs\", \"tpx_time_abs\", \"tpx_duration_abs\", \"tpx_set_abs\", \"tpx_date_none_abs\", \"tpx_date_full_abs\",\n\t\"tpx_date_past_ref_abs\", \"tpx_date_present_ref_abs\", \"tpx_date_future_ref_abs\"]\n\treturn labels_abs",
"def get_labels_and_times(dict_):\n labels = np.asarray(list(dict_.items()))\n times = labels[:, 0].astype(int)\n labels = labels[:, 1]\n\n return labels, times",
"def SAMT_labels(self):\n \t\t#find basic labels\n \t\tlabels_basic = self.dependency_labels()\n \t\tlabels = Labels(labels_basic)\n \t\treturn labels.SAMT_labels()",
"def _compute_thresholds(self, thresholds):\r\n thr = thresholds\r\n limit = int(1 / thresholds)\r\n thresholds = [x * thr for x in range(limit)]\r\n thresholds.append(1)\r\n return thresholds",
"def make_labels(self, ilines):\n\n llist = []\n for lind, lstr in enumerate(ilines):\n # get label and value list\n rv, label, vals = self.get_label_vals(lstr)\n if rv < 1: continue\n\n nvals = len(vals)\n\n # label = self.find_parent_label(label)\n\n if self.verb > 2: print('++ label: %s, %d val(s)' % (label, nvals))\n\n llist.append(label)\n self.maxcounts[label] = nvals\n self.subjcounts[label] = 0\n\n if not UTIL.vals_are_unique(llist):\n print('** warning: labels are not unique, will use only last values')\n llist = UTIL.get_unique_sublist(llist)\n\n return 0, llist",
"def check_goCue_delays(data, **_):\n metric = np.nan_to_num(data[\"goCue_times\"] - data[\"goCueTrigger_times\"], nan=np.inf)\n passed = (metric <= 0.0015) & (metric > 0)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed"
]
| [
"0.6164587",
"0.6123731",
"0.60513777",
"0.60192835",
"0.58672625",
"0.5849063",
"0.5819105",
"0.58170956",
"0.5804545",
"0.57796496",
"0.57647556",
"0.57647556",
"0.57647556",
"0.57298756",
"0.57082045",
"0.56462866",
"0.5634969",
"0.562641",
"0.561223",
"0.5570678",
"0.5561428",
"0.5560172",
"0.55532277",
"0.55418956",
"0.55182964",
"0.5517045",
"0.5508603",
"0.5506138",
"0.55021435",
"0.5501958"
]
| 0.6326991 | 0 |
Get the proportion waiting times within the target for a given trial of a threshold | def get_target_proportions_of_current_trial(individuals, target):
ambulance_waits, ambulance_target_waits = 0, 0
other_waits, other_target_waits = 0, 0
for individual in individuals:
ind_class = len(individual.data_records) - 1
rec = individual.data_records[-1]
if rec.node == 2 and ind_class == 0:
other_waits += 1
if rec.waiting_time < target:
other_target_waits += 1
elif rec.node == 2 and ind_class == 1:
ambulance_waits += 1
if rec.waiting_time < target:
ambulance_target_waits += 1
return ambulance_waits, ambulance_target_waits, other_waits, other_target_waits | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _probe_wait_time(self):\n r = self.probe_cycle_time / float(len(self.servers)) #self.probe_cycle_time=5\n r = max(.25, r) # Cap it at four per second\n return r",
"def precision_threshold(predictions, targets, threshold=0.7):\n number_of_examples_meeting_threshold = 0\n\n for pred, targ in zip(predictions, targets):\n total_positive_guesses = sum(pred)\n correct_positive_guesses = 0\n\n for shift_pred, shift_targ in zip(pred, targ):\n if shift_pred == 1 and shift_targ == 1:\n correct_positive_guesses += 1\n\n example_precision = correct_positive_guesses / total_positive_guesses\n if example_precision > threshold:\n number_of_examples_meeting_threshold += 1\n\n print(number_of_examples_meeting_threshold)\n examples_meeting_threshold_ratio = number_of_examples_meeting_threshold / len(predictions)\n print(examples_meeting_threshold_ratio)",
"def tpr_at_confidence(self, threshold):\r\n\r\n return numpy.sum(self.test_confidences[numpy.logical_not(self.test_errors)] >= threshold) / float(numpy.sum(numpy.logical_not(self.test_errors)))",
"def test(numTrials):\n # Your Code Here\n hits = 0.0\n for i in range(numTrials):\n result = trial()\n #print result\n hits += result\n return hits / numTrials",
"def percent_waiting(self):\n return self._percent_waiting",
"def reward_min_waiting_time(state, *args):\n try:\n wait_times = state.feature_map(\n filter_by=('waiting_time',)\n )\n except AttributeError:\n wait_times = state\n\n ret = {}\n for tls_id, phase_obs in wait_times.items():\n ret[tls_id] = -sum([dly for obs in phase_obs for dly in obs])\n return ret",
"def tpr(positive, negative, fpr):\n threshold = np.percentile(np.asarray(negative), 100 - fpr)\n total_true_positives = sum(positive > threshold)\n\n return total_true_positives / len(positive)",
"def estimate(values, target):\n\n # next time\n # diff(values)\n\n\n return 1.",
"def reward_threshold(self) -> Optional[float]:",
"def calc_stay_prob(rollouts):\n states = rollouts.states\n actions = rollouts.actions\n rewards = rollouts.rewards\n\n num_test_episodes = states.shape[0]\n num_trials = states.shape[1]\n count_trial_stayed = 0.01 + np.zeros((2, 2, num_test_episodes)) # [common/uncommon, reward/unrewarded]\n count_trial_all = 0.01 + np.zeros((2, 2, num_test_episodes))\n for epi in range(num_test_episodes):\n for t in range(0, num_trials-2, 2):\n uncommon_transition = int(actions[epi, t] != states[epi, t+1]-1)\n count_trial_all[uncommon_transition, (0 if rewards[epi, t+1] else 1), epi] += 1\n count_trial_stayed[uncommon_transition, (0 if rewards[epi, t+1] else 1), epi] += \\\n int(actions[epi, t+2] == actions[epi, t])\n return np.divide(count_trial_stayed, count_trial_all), count_trial_stayed, count_trial_all",
"def calculateWaitingTime(self, inputs):\n CollisionCounter.CollisionCounter.getInstance().waitingTimeCalculated(self.time)\n timeUntilDepature = self.getAtt('departure_time', inputs) - self.time\n remainingLoadingTime = self.calculateLoadingTime(inputs)\n # calculates first maximum possible waiting time\n sampleTime = int((timeUntilDepature - remainingLoadingTime) / self.participants)\n\n if sampleTime >= 1:\n # result is big enough for a standard treatment\n self.waitingTime = MyRandom.RandomNumber.getInstance().getRandomNumber(sampleTime + 1)\n elif sampleTime < 1:\n # reslut is too small, special treatment necessary\n upperLimit = (10 * (1 - (math.exp(sampleTime - 1)))) + 1\n self.waitingTime = MyRandom.RandomNumber.getInstance().getRandomNumber(max((min(upperLimit,\n timeUntilDepature)) + 1, 1))\n # decides whether charging is allowed during waiting time\n if not self.stayedConnected:\n self.stayConnected = True\n self.stayedConnected = True\n else:\n self.stayConnected = False\n self.stayedConnected = False",
"def num_trials(self):",
"def get_wait_time(*args, threshold: float = 0.9, rate_limit_header: str = \"X-Shopify-Shop-Api-Call-Limit\"):\n # average load based on threshold\n mid_load = threshold / 2\n # find the requests.Response inside args list\n for arg in args:\n response = arg if isinstance(arg, requests.models.Response) else None\n # Get the rate_limits from response\n rate_limits = response.headers.get(rate_limit_header) if response else None\n # define current load from rate_limits\n if rate_limits:\n current_rate, max_rate_limit = rate_limits.split(\"/\")\n load = int(current_rate) / int(max_rate_limit)\n else:\n load = None\n # define wait_time based on load conditions\n if not load:\n # when there is no rate_limits from header, use the `sleep_on_unknown_load`\n wait_time = ShopifyRateLimiter.on_unknown_load\n elif load >= threshold:\n wait_time = ShopifyRateLimiter.on_high_load\n elif load >= mid_load:\n wait_time = ShopifyRateLimiter.on_mid_load\n elif load < mid_load:\n wait_time = ShopifyRateLimiter.on_low_load\n return wait_time",
"def get_mean_waits_of_current_threshold(\n lambda_2,\n lambda_1,\n mu,\n num_of_servers,\n threshold,\n seed_num,\n num_of_trials,\n runtime,\n target,\n):\n current_ambulance_proportions = []\n current_other_proportions = []\n current_combined_proportions = []\n\n if seed_num == None:\n seed_num = random.random()\n\n for trial in range(num_of_trials):\n individuals = simulate_model(\n lambda_2, lambda_1, mu, num_of_servers, threshold, seed_num + trial, runtime\n ).get_all_individuals()\n (\n ambulance_waits,\n ambulance_target_waits,\n other_waits,\n other_target_waits,\n ) = get_target_proportions_of_current_trial(individuals, target)\n\n current_ambulance_proportions.append(\n (ambulance_target_waits / ambulance_waits) if ambulance_waits != 0 else 1\n )\n current_other_proportions.append(\n (other_target_waits / other_waits) if other_waits != 0 else 1\n )\n current_combined_proportions.append(\n (ambulance_target_waits + other_target_waits)\n / (ambulance_waits + other_waits)\n if (ambulance_waits + other_waits) != 0\n else 1\n )\n\n return (\n np.nanmean(current_ambulance_proportions),\n np.nanmean(current_other_proportions),\n np.nanmean(current_combined_proportions),\n )",
"def expected_return(self, n_step):\r\n value = 0\r\n n_experiences = 50\r\n for i in range(n_experiences):\r\n trajectory = self.domain_exploration(n_step)\r\n value += self.compute_j(trajectory)\r\n return value/n_experiences",
"def validation_tpr_at_confidence(self, threshold):\r\n\r\n validation_confidences = self.validation_confidences[numpy.logical_not(self.validation_errors)]\r\n return numpy.sum(validation_confidences >= threshold) / float(validation_confidences.shape[0])",
"def get_expected_objective(self) -> float:\n # pylint: disable=invalid-name\n obj = 0.\n for gr in self.grounded.values():\n dist = gr.get_expected_dist_to_satisfaction()\n obj += 1 - self.weight * max(0, dist) ** 2\n return obj",
"def get_target_per_score(self):\n pass",
"def percent_passing(self) -> float:\n num_meas = Enumerable(self.mlc_meas).select_many(lambda m: m.passed).count()\n num_pass = (\n Enumerable(self.mlc_meas)\n .select_many(lambda m: m.passed)\n .count(lambda p: bool(p) is True)\n )\n return float(100 * num_pass / num_meas)",
"def pumpThreshold(self):\n EsatL, tauL, etaP = self.EsatL, self.tauL, self.etaP\n loss, DR = self.loss, self.DR\n return(EsatL / tauL * (loss + DR) / etaP)",
"def calc_sleep(n_new, n_fetched, new_per):\n if False:\n # So, my fancy way of doing this wasn't really better than the\n # simple way below.\n if not hasattr(calc_sleep, \"sleep_time_hours\"):\n calc_sleep.sleep_time_hours = 1.5 # Starting sleep time.\n # Sleep for some amount of time using `new_perecent`.\n # Target is approximately 70 %, since if 90 % we take 1/3rd the time,\n # if 50 % we take 5/3rds the time, and scale from 1/3 to 5/3 inbetween,\n # capping if not in between. The randdom part is just so the\n # resulting numbers are not even and we don't end up in a loop.\n # Liner scale:\n # f(x) = C * (1 - (x - A) / (B - A)) + D * (x - A) / (B - A)\n # Which is:\n # f(x) = x * (D - C) / (B - A) + C + A * (C - D) / (B - A)\n # So for (A, B) -> (C, D) = (50, 90) -> (5/3, 1/3)\n # f(x) = 3.33333333 - 0.03333333 * x\n new_per = 3.33333333 - 0.03333333 * new_per\n if new_per > 1.7:\n new_per = 1.7\n elif new_per < 0.3:\n new_per = 0.3\n # Plus something between -0.128 and 0.127.\n new_per = new_per + float(ord(os.urandom(1)) - 128) / 1000\n calc_sleep.sleep_time_hours = calc_sleep.sleep_time_hours * new_per\n return calc_sleep.sleep_time_hours\n else:\n # This is 5.8 + rand(-1.28, 1.27).\n max_sleep_time = 5.8 + float(ord(os.urandom(1)) - 128) / 100\n return float(n_fetched - n_new) * max_sleep_time / float(n_fetched)\n\n # If we get here, the big if didn't return anything.\n return 1.5 # Hours.",
"def threshold(self) -> float:\n return pulumi.get(self, \"threshold\")",
"def calc_total_wait(self, current_time_step):\n self.total_wait = current_time_step - self.time_entered\n return self.total_wait",
"def check_response_stimFreeze_delays(data, **_):\n # Calculate the difference between stimOn and goCue times.\n # If either are NaN, the result will be Inf to ensure that it crosses the failure threshold.\n metric = np.nan_to_num(data[\"stimFreeze_times\"] - data[\"response_times\"], nan=np.inf)\n # Test for valid values\n passed = ((metric < 0.1) & (metric > 0)).astype(float)\n # Finally remove no_go trials (stimFreeze triggered differently in no_go trials)\n # These values are ignored in calculation of proportion passed\n passed[data[\"choice\"] == 0] = np.nan\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def estimate(self):\n self.p_est = sum([t.estimate() for t in self.tasks])\n return self.p_est",
"def calculate_progress_percentage(d):\n successcounter = 0\n for test in d:\n if d[test][\"status\"] != \"not yet run\":\n successcounter += 1\n totalcounter = 0\n for test in d:\n totalcounter += 1\n return int(successcounter / totalcounter * 100)",
"def calculate_timeout(self):\n return self.total_estimated_words() / self.minimum_wpm * 60",
"def _compute_sampling_threshold(global_step, k):\n return k / (k + math.exp(global_step / k))",
"def trackThresholdOptical(threshold, ants=0) :\n SPEED_OF_LIGHT = 299792458.0 # m/s\n antlist = helpers.makeList(ants)\n if antlist[0] == 0: antlist = currentAntennaNumbers()\n flo = lofreq()\n t = 0\n for ant in antlist:\n antmp = \"control.antenna%d\" %ant\n d = queryDouble(antmp + \".diameter\")\n t = queryDouble(antmp + \".trackTolerance\")\n tbw = threshold*math.pi/(180*60*60)*flo*1e9*d/SPEED_OF_LIGHT\n #print \"Threshold in bw:\", tbw\n trackThreshold(tbw, ant)\n return t",
"def evaluate(self, threshold=0.5):\n pass"
]
| [
"0.6077163",
"0.6067749",
"0.60207504",
"0.5824031",
"0.57592654",
"0.570491",
"0.5646278",
"0.5642472",
"0.56416166",
"0.5618799",
"0.5616272",
"0.5580728",
"0.5558966",
"0.55399644",
"0.552939",
"0.5522275",
"0.55195254",
"0.5507266",
"0.54815686",
"0.5480748",
"0.5465179",
"0.5464018",
"0.5458225",
"0.5456444",
"0.54469794",
"0.543743",
"0.54351455",
"0.54207325",
"0.5417883",
"0.54126585"
]
| 0.729661 | 0 |
Calculates the mean proportion of times that satisfy the target of all trials for the current threshold iteration Returns float, float, float The mean waiting times for ambulance patients, other patients and all patients for a given threshold | def get_mean_waits_of_current_threshold(
lambda_2,
lambda_1,
mu,
num_of_servers,
threshold,
seed_num,
num_of_trials,
runtime,
target,
):
current_ambulance_proportions = []
current_other_proportions = []
current_combined_proportions = []
if seed_num == None:
seed_num = random.random()
for trial in range(num_of_trials):
individuals = simulate_model(
lambda_2, lambda_1, mu, num_of_servers, threshold, seed_num + trial, runtime
).get_all_individuals()
(
ambulance_waits,
ambulance_target_waits,
other_waits,
other_target_waits,
) = get_target_proportions_of_current_trial(individuals, target)
current_ambulance_proportions.append(
(ambulance_target_waits / ambulance_waits) if ambulance_waits != 0 else 1
)
current_other_proportions.append(
(other_target_waits / other_waits) if other_waits != 0 else 1
)
current_combined_proportions.append(
(ambulance_target_waits + other_target_waits)
/ (ambulance_waits + other_waits)
if (ambulance_waits + other_waits) != 0
else 1
)
return (
np.nanmean(current_ambulance_proportions),
np.nanmean(current_other_proportions),
np.nanmean(current_combined_proportions),
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_target_proportions_of_current_trial(individuals, target):\n ambulance_waits, ambulance_target_waits = 0, 0\n other_waits, other_target_waits = 0, 0\n for individual in individuals:\n ind_class = len(individual.data_records) - 1\n rec = individual.data_records[-1]\n if rec.node == 2 and ind_class == 0:\n other_waits += 1\n if rec.waiting_time < target:\n other_target_waits += 1\n elif rec.node == 2 and ind_class == 1:\n ambulance_waits += 1\n if rec.waiting_time < target:\n ambulance_target_waits += 1\n\n return ambulance_waits, ambulance_target_waits, other_waits, other_target_waits",
"def pumpThreshold(self):\n EsatL, tauL, etaP = self.EsatL, self.tauL, self.etaP\n loss, DR = self.loss, self.DR\n return(EsatL / tauL * (loss + DR) / etaP)",
"def average_waiting(self):\n return self._average_waiting",
"def tpr_at_confidence(self, threshold):\r\n\r\n return numpy.sum(self.test_confidences[numpy.logical_not(self.test_errors)] >= threshold) / float(numpy.sum(numpy.logical_not(self.test_errors)))",
"def precision_threshold(predictions, targets, threshold=0.7):\n number_of_examples_meeting_threshold = 0\n\n for pred, targ in zip(predictions, targets):\n total_positive_guesses = sum(pred)\n correct_positive_guesses = 0\n\n for shift_pred, shift_targ in zip(pred, targ):\n if shift_pred == 1 and shift_targ == 1:\n correct_positive_guesses += 1\n\n example_precision = correct_positive_guesses / total_positive_guesses\n if example_precision > threshold:\n number_of_examples_meeting_threshold += 1\n\n print(number_of_examples_meeting_threshold)\n examples_meeting_threshold_ratio = number_of_examples_meeting_threshold / len(predictions)\n print(examples_meeting_threshold_ratio)",
"def pc_work_time_avg(self) -> \"float\":\n return _beamforming_swig.randomsampler_sptr_pc_work_time_avg(self)",
"def success_rate(x_tapes):\n return np.sum([is_success(x_tape) for x_tape in x_tapes]) / len(x_tapes)",
"def percent_passing(self) -> float:\n num_meas = Enumerable(self.mlc_meas).select_many(lambda m: m.passed).count()\n num_pass = (\n Enumerable(self.mlc_meas)\n .select_many(lambda m: m.passed)\n .count(lambda p: bool(p) is True)\n )\n return float(100 * num_pass / num_meas)",
"def pc_work_time_avg(self) -> \"float\":\n return _beamforming_swig.phasedarray_sptr_pc_work_time_avg(self)",
"def averageTime(self):\n \n pass",
"def pc_throughput_avg(self) -> \"float\":\n return _beamforming_swig.randomsampler_sptr_pc_throughput_avg(self)",
"def pc_work_time_avg(self) -> \"float\":\n return _beamforming_swig.beamformer_sptr_pc_work_time_avg(self)",
"def pc_throughput_avg(self) -> \"float\":\n return _beamforming_swig.phasedarray_sptr_pc_throughput_avg(self)",
"def avg(self):\n return sum(self.times) / len(self.times)",
"def avg(self):\n return sum(self.times) / len(self.times)",
"def avg(self):\n return sum(self.times) / len(self.times)",
"def pc_throughput_avg(self) -> \"float\":\n return _beamforming_swig.beamformer_sptr_pc_throughput_avg(self)",
"def mean_run_time(self) -> float:\n return float(self.result_array.sum(axis=0).mean())",
"def calc_squad_attack_success(self):\n attack_success_list = []\n active_units = self.get_active_units()\n if len(active_units) == 0:\n return 0\n for unit in active_units:\n a_s = unit.calc_attack_success()\n attack_success_list.append(a_s)\n return geometric_mean(attack_success_list)",
"def get_pvalue_thd(self):\n terminals_values = []\n for terminal in self.feature_tree.get_terminals():\n temp = self.get_mannwitneyu_pvalue(terminal)\n terminals_values.append(temp)\n if temp == 1:\n print('non siginificant')\n while 0 in terminals_values:\n terminals_values.remove(0)\n self.pvalue_thd = min(self.pvalue_thd,np.mean(terminals_values))\n #print('pvalue_thd',self.pvalue_thd)",
"def _tstat_all(self):\n return np.squeeze(self.solution) / self._se_all",
"def cal_average_kill_turns(deck):\n #Results array\n turn_results = np.zeros(NUM_SIMS)\n \n #Simulation loop\n for i in range(NUM_SIMS): \n if VERBOSE:\n print('Running simulation ' + str(i + 1)) \n turn_results[i] = cal_kill_turn(copy.deepcopy(deck))\n #End of Simulations\n \n #DETERMINE ATK\n average_kill_turn = np.average(turn_results)\n min_kill_turn = np.min(turn_results)\n max_kill_turn = np.max(turn_results)\n \n return average_kill_turn, min_kill_turn, max_kill_turn",
"def mape(self) -> float:\n return float(np.mean(np.abs((self.true - self.predicted) / self.true)) * 100)",
"def calculateAverage(self, data):\n\n nValidTrials = data['nValid'][-1]\n nRewardTrials = data['nRewarded'][-1]\n return float(nRewardTrials)/nValidTrials",
"def get_average_complete_progress(self):\n cnt_events = len(self.get_events())\n if cnt_events > 0:\n return sum([self.get_average_progress(event) for event in self.get_events()]) * 1.0 / cnt_events\n return 0.0",
"def average(self):\n total = 0\n for t in self.memory:\n total += t.reward\n return total/self.__len__()",
"def get_average_repro(self):\n return np.mean([agent.get_fledge_probability() for agent in self.agents])",
"def _compute_population_estimate(cls, certificates):\n assert isinstance(certificates, list)\n assert len(certificates) >= cls.certificate_sample_length\n\n sum_means = 0\n sum_waits = 0\n for certificate in certificates[:cls.certificate_sample_length]:\n sum_waits += certificate.duration - cls.minimum_wait_time\n sum_means += certificate.local_mean\n\n avg_wait = sum_waits / len(certificates)\n avg_mean = sum_means / len(certificates)\n\n return avg_mean / avg_wait",
"def avgtr(self):\n return np.diff(self.trtimes).mean()",
"def average_precision(tp,fp,npos):\r\n \r\n fp = np.cumsum(fp)\r\n tp = np.cumsum(tp)\r\n rec = tp / float(npos)\r\n # avoid divide by zero in case the first detection matches a difficult\r\n # ground truth\r\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\r\n \r\n # compute VOC AP using 11 point metric\r\n ap = 0.0\r\n for t in np.arange(0.0, 1.1, 0.1):\r\n if np.sum(rec >= t) == 0:\r\n p = 0\r\n else:\r\n p = np.max(prec[rec >= t])\r\n ap = ap + p / 11.0\r\n\r\n return ap"
]
| [
"0.6751316",
"0.59683293",
"0.59454405",
"0.59434617",
"0.5935533",
"0.593354",
"0.59295374",
"0.5865449",
"0.58428824",
"0.5830085",
"0.58298254",
"0.57752395",
"0.5756599",
"0.57528955",
"0.57528955",
"0.57528955",
"0.5750205",
"0.5748908",
"0.573746",
"0.57284045",
"0.5714656",
"0.56852627",
"0.56763947",
"0.5669382",
"0.56470615",
"0.56416285",
"0.5640653",
"0.5637936",
"0.56368446",
"0.56285685"
]
| 0.6575508 | 1 |
Builds a plot that shows the proportion of individuals that satisfy the desired waiting time target. The plot shows the proportions of ambulance patients, other patients and the combined proportion of the two, that satisfy the target. | def make_plot_for_proportion_within_target(
lambda_2,
lambda_1,
mu,
num_of_servers,
num_of_trials,
seed_num,
target,
runtime=1440,
max_threshold=None,
):
ambulance_proportions = []
other_proportions = []
all_proportions = []
if max_threshold == None:
max_threshold = num_of_servers
for threshold in range(max_threshold + 1):
mean_ambulance, mean_other, mean_combined = get_mean_waits_of_current_threshold(
lambda_2,
lambda_1,
mu,
num_of_servers,
threshold,
seed_num,
num_of_trials,
runtime,
target,
)
ambulance_proportions.append(mean_ambulance)
other_proportions.append(mean_other)
all_proportions.append(mean_combined)
plt.figure(figsize=(23, 10))
proportion_plot = plt.plot(
ambulance_proportions, ":", other_proportions, ":", all_proportions, "-"
)
plt.title(
"Proportion of individuals within target for different capacity thresholds"
)
plt.xlabel("Capacity Threshold")
plt.ylabel("Proportion of Individuals within target")
plt.legend(
["Ambulance Patients", "Other Patient", "All Patients"], fontsize="x-large"
)
return proportion_plot | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def comp_time_plot(p1=database['K+'], p2=database['pi+'], pmax=80, plot=True):\r\n dt = []\r\n p_range = np.linspace(10, pmax, 1000)\r\n m1 = p1.mass\r\n m2 = p2.mass\r\n for p in p_range:\r\n t1_per_m = 76.273/(beta(p, m1)*gamma(p, m1)*c)\r\n t2_per_m = 76.273/(beta(p, m2)*gamma(p, m2)*c)\r\n dt.append(abs(t1_per_m - t2_per_m)*1e12)\r\n dt_12_5 = dt[np.argmin(abs(p_range-12.5))]\r\n dt_75 = dt[np.argmin(abs(p_range-75))]\r\n ratio = dt_12_5/dt_75\r\n if plot==True:\r\n fig = plt.figure(figsize=[10, 5])\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.plot(p_range, dt, 'b', label=r'$\\Delta t$')\r\n ax.axvline(12.5, color='r', label='p=12.5 GeV')\r\n ax.axvline(75, color='g', label='p=75 GeV')\r\n ax.set_xlim(10, pmax)\r\n ax.set_ylim(0)\r\n ax.set_xlabel('p / GeV', fontsize=20)\r\n# ax.set_yscale('log')\r\n ax.set_ylabel(r'$\\Delta t$ / ps', fontsize=20)\r\n title = f'{p1.name} to {p2.name} '\r\n title += r'$\\Delta t$ dependancy on particle momenta'\r\n ax.set_title(title, fontsize=20)\r\n ax.legend(fontsize=20)\r\n text = 'dt(12.5) = {0:.2f} ps, '.format(dt_12_5)\r\n text += 'dt(75) = {0:.2f} ps, '.format(dt_75)\r\n text += 'ratio = {0:.3f}'.format(ratio)\r\n plt.show()\r\n print(text)\r\n return [dt_12_5, dt_75, ratio]",
"def simulationDelayedTreatment(numTrials):\n \n numViruses = 100\n maxPop = 1000\n maxBirthProb = 0.1\n clearProb = 0.05\n resistances = {'guttagonol': False}\n mutProb = 0.005\n delays = [300, 150, 75, 0]\n f, axarr = pylab.subplots(2, 2)\n x_plot = []\n\n for delay in delays:\n FinalPopSize = [0.0 for x in range(numTrials)]\n for trial in range(numTrials):\n viruses = [ResistantVirus(maxBirthProb, clearProb, resistances, mutProb) for n in range(numViruses)]\n patient = TreatedPatient(viruses, maxPop)\n for i in range(delay):\n patient.update()\n patient.addPrescription('guttagonol')\n for j in range(delay, delay+150):\n patient.update()\n FinalPopSize[trial] = patient.getTotalPop()\n x_plot.append(FinalPopSize)\n\n axarr[0, 0].hist(x_plot[0])\n axarr[0, 1].hist(x_plot[1])\n axarr[1, 0].hist(x_plot[2])\n axarr[1, 1].hist(x_plot[3])\n pylab.show()\n\n # pylab.plot(avgPopSize, label = 'avg pop size')\n # pylab.plot(avgGuttagonolResistantPop, label = 'avg pop size guttagonol-resistant')\n # pylab.xlabel(\"Time\")\n # pylab.ylabel(\"Average Population Size\")\n # pylab.title(\"Average Size of the Virus Populations\")\n # pylab.legend(loc = 'best')\n # pylab.show()",
"def get_target_proportions_of_current_trial(individuals, target):\n ambulance_waits, ambulance_target_waits = 0, 0\n other_waits, other_target_waits = 0, 0\n for individual in individuals:\n ind_class = len(individual.data_records) - 1\n rec = individual.data_records[-1]\n if rec.node == 2 and ind_class == 0:\n other_waits += 1\n if rec.waiting_time < target:\n other_target_waits += 1\n elif rec.node == 2 and ind_class == 1:\n ambulance_waits += 1\n if rec.waiting_time < target:\n ambulance_target_waits += 1\n\n return ambulance_waits, ambulance_target_waits, other_waits, other_target_waits",
"def plot_activities(df, **kwargs):\n if \"cmap\" not in kwargs:\n cmap = build_cmap(df)\n else:\n cmap=kwargs[\"cmap\"]\n\n df['color'] = df['act'].map(cmap)\n pids = df['pid'].unique()\n\n fig, axs = plt.subplots(\n len(pids), 1,\n figsize=(\n 16,\n 3 + (1 * (len(pids)-1)) # fudge to keep proportions about right\n ),\n sharex=True\n )\n\n for idx, pid in enumerate(pids):\n person_data = df.loc[df.pid == pid]\n label_x, label_y, labels = [], [], []\n\n if len(pids) == 1:\n ax = axs\n else:\n ax = axs[idx]\n\n for i in range(len(person_data)):\n y = 1\n data = person_data.iloc[i]\n ax.barh(y, \n width='dur', \n data=data,\n left='start_time',\n label='act',\n color='color',\n edgecolor='black',\n linewidth=2\n )\n \n #Populate Labelling Params\n label_x.append(data['start_time'] + data['dur'] / 2)\n label_y.append(y)\n labels.append(data.act)\n \n # Labels\n rects = ax.patches\n for x, y, rect, label in zip(label_x, label_y, rects, labels):\n if label == 'Travel':\n color = 'white'\n else:\n color = 'black'\n\n if rect.get_width() >= 2:\n ax.text(x, y, label, ha='center', va='center',\n fontdict={\n 'color':color, 'size':10, 'weight':'regular'\n }\n )\n continue\n if rect.get_width() >= .5:\n ax.text(x, y, label, ha='center', va='center',\n fontdict={\n 'color':color, 'size':10, 'weight':'regular', 'rotation':90\n }\n )\n\n ax.set_title(f\"Person ID: {pid}\")\n ax.get_yaxis().set_visible(False)\n for side in ['top', 'right', 'bottom', 'left']:\n ax.spines[side].set_visible(False)\n\n legend_elements = []\n for act, color in cmap.items():\n legend_elements.append(\n Patch(facecolor=color, edgecolor='black', label=act)\n )\n \n plt.xticks(range(25))\n plt.xlim(right=24)\n plt.legend(\n handles=legend_elements, ncol=len(legend_elements),\n prop={'size':12}, frameon=False,\n bbox_to_anchor=(.5, -.5), loc='upper center', borderaxespad=0.)\n plt.tight_layout()\n\n if kwargs.get(\"path\") is not None:\n plt.savefig(kwargs[\"path\"])",
"def waiting_times(all_data):\n print('Computing waiting times')\n result = {'p': [], 'alpha': [], 'durations': []}\n for data in all_data:\n N = data['config']['N']\n p = data['config']['p']\n alpha = data['config']['alpha']\n print(f'p = {p}, alpha = {alpha}')\n\n # find dominant strategy at each point in time\n print(' > Finding dominant strategies')\n dom_strats = np.asarray(list(map(lambda e: get_dominant_strategy(e), data['snapshots'])))\n print(f' >> Found {np.unique(dom_strats).size} unique strategies')\n\n if np.unique(dom_strats).size <= 1:\n print(' >> Skipping')\n continue\n\n # detect dominant strategy changes (and durations)\n print(' > Computing durations')\n durations = get_domain_durations(dom_strats)\n durations /= N**2\n print(f' >> Found {durations.size} durations')\n\n # store result\n result['p'].extend([p]*len(durations))\n result['alpha'].extend([alpha]*len(durations))\n result['durations'].extend(durations)\n\n df = pd.DataFrame(result)\n\n # plot w-time distributions\n print(' > Plotting')\n for p in df['p'].unique():\n sub = df[df['p']==p]\n\n plt.figure()\n for alpha, group in sub.groupby(['alpha']):\n sns.distplot(\n group['durations'],\n kde=False, label=rf'$\\alpha={alpha}$')\n\n plt.title(rf'Distribution of waiting times ($p={p}$)')\n plt.xlabel(r'$\\Delta t$')\n plt.ylabel(r'count')\n plt.legend(loc='best')\n\n plt.savefig(f'images/waiting_times_p{p}.pdf')\n\n ## plot wtd dependence on parameters\n plt.figure()\n sns.boxplot(x='alpha', y='durations', hue='p', data=df)\n plt.savefig('images/waiting_times_vs_alpha.pdf')\n plt.close()\n\n return df",
"def build_preferred_foot_and_potential_distribution(player_attributes):\n fig, ax = plt.subplots(nrows=1, ncols=2)\n fig.set_size_inches(20, 10)\n\n sns.countplot(x=player_attributes[\"preferred_foot\"], ax=ax[0])\n sns.countplot(x=player_attributes[\"potential\"], ax=ax[1])\n\n pl.xticks(rotation=270)\n fig.tight_layout()\n plt.show()",
"def simulationTwoDrugsDelayedTreatment(numTrials):\n \n numViruses = 100\n maxPop = 1000\n maxBirthProb = 0.1\n clearProb = 0.05\n resistances = {'guttagonol': False, 'grimpex': False}\n mutProb = 0.005\n delays = [300, 150, 75, 0]\n f, axarr = pylab.subplots(2, 2)\n x_plot = []\n\n for delay in delays:\n FinalPopSize = [0.0 for x in range(numTrials)]\n for trial in range(numTrials):\n viruses = [ResistantVirus(maxBirthProb, clearProb, resistances, mutProb) for n in range(numViruses)]\n patient = TreatedPatient(viruses, maxPop)\n for i in range(150):\n patient.update()\n patient.addPrescription('guttagonol')\n for j in range(150, 150+delay):\n patient.update()\n patient.addPrescription('grimpex')\n for k in range(150+delay, 300+delay):\n patient.update()\n FinalPopSize[trial] = patient.getTotalPop()\n x_plot.append(FinalPopSize)\n\n axarr[0, 0].hist(x_plot[0])\n axarr[0, 1].hist(x_plot[1])\n axarr[1, 0].hist(x_plot[2])\n axarr[1, 1].hist(x_plot[3])\n pylab.show()\n return x_plot",
"def plot_associative_learning_progress(ax, df):\n\n num_objects_list = sorted(df.curr_num_objects.unique())\n legend_list = []\n for idx in num_objects_list:\n ax.plot(df[df.curr_num_objects == idx].groupby('objects_iter').rewards.mean())\n legend_list.append(f'ns={idx}')\n ax.set_xlabel('Stimulus iteration')\n ax.set_ylabel('P(correct)')\n ax.set_ylim([0.4, 1])\n ax.legend(legend_list)",
"def output_tasks_launched_versus_time(self, output_directory):\r\n gnuplot_file = open(\"%s/task_launches_vs_time.gp\" % output_directory, \"w\")\r\n gnuplot_file.write(\"set terminal postscript color 'Helvetica' 12\\n\")\r\n gnuplot_file.write(\"set output '%s/task_launches_vs_time.ps'\\n\" % output_directory)\r\n gnuplot_file.write(\"set xlabel 'Time (ms)'\\n\")\r\n gnuplot_file.write(\"set ylabel 'Tasks Launched'\\n\")\r\n gnuplot_file.write(\"plot \")\r\n\r\n job_count = 0\r\n for id, request in self.__requests.items():\r\n results_filename = \"%s/%s_tasks_launched_vs_time\" % (output_directory, id)\r\n file = open(results_filename, \"w\")\r\n arrival_time, reservation_replies = request.get_scheduler_get_task_times()\r\n reservation_count = 0\r\n file.write(\"0\\t0\\n\")\r\n for reservation in reservation_replies:\r\n reservation_count += 1\r\n # Write the elapsed time since the request arrived.\r\n file.write(\"%s\\t%s\\n\" % (reservation - arrival_time, reservation_count))\r\n file.close()\r\n\r\n if job_count != 0:\r\n gnuplot_file.write(\",\\\\\\n\")\r\n gnuplot_file.write(\"'%s' using 1:2 lw 1 with lp\" % results_filename)\r\n job_count += 1\r\n if job_count >= 20:\r\n break\r\n gnuplot_file.close()",
"def updatefig(*args):\n p1.set_array(turn(grid))\n p2.set_data(tally['time'], tally['sickos'])\n p3.set_data(tally['time'], tally['immune'])\n p4.set_data(tally['time'], tally['dead'])\n ax2.set_xlim(0, max(tally['time']))\n # ax2.set_ylim(0, max(max(sickos), max(immune)))\n # End sim if the disease is gone\n if tally['sickos'][-1] == 0:\n ani.event_source.stop()\n end_time = time.process_time()\n show_summary()\n print(\"Process time:\", end_time - start_time)\n return p1, p2, p3, p4,",
"def plot_budget_analyais_results(df, fs=8, fs_title=14, lw=3, fontsize=20, colors=['#AA3377', '#009988', '#EE7733', '#0077BB', '#BBBBBB', '#EE3377', '#DDCC77']):\n df_decomposed = df.loc[df['block'] == 'decomposed']\n df_joint = df.loc[df['block'] == 'joint']\n ticklabels = []\n num_sweeps = df_decomposed['num_sweeps'].to_numpy()\n sample_sizes = df_decomposed['sample_sizes'].to_numpy()\n for i in range(len(num_sweeps)):\n ticklabels.append('K=%d\\nL=%d' % (num_sweeps[i], sample_sizes[i]))\n fig = plt.figure(figsize=(fs*2.5, fs))\n ax1 = fig.add_subplot(1, 2, 1)\n ax1.plot(num_sweeps, df_decomposed['density'].to_numpy(), 'o-', c=colors[0], linewidth=lw, label=r'$\\{\\mu, \\tau\\}, \\{c\\}$')\n ax1.plot(num_sweeps, df_joint['density'].to_numpy(), 'o-', c=colors[1], linewidth=lw,label=r'$\\{\\mu, \\tau, c\\}$')\n ax1.set_xticks(num_sweeps)\n ax1.set_xticklabels(ticklabels)\n ax1.tick_params(labelsize=fontsize)\n ax1.grid(alpha=0.4)\n ax2 = fig.add_subplot(1, 2, 2)\n ax2.plot(num_sweeps, df_decomposed['ess'].to_numpy(), 'o-', c=colors[0], linewidth=lw,label=r'$\\{\\mu, \\tau\\}, \\{c\\}$')\n ax2.plot(num_sweeps, df_joint['ess'].to_numpy(), 'o-', c=colors[1], linewidth=lw,label=r'$\\{\\mu, \\tau, c\\}$')\n ax2.set_xticks(num_sweeps)\n ax2.set_xticklabels(ticklabels)\n ax2.tick_params(labelsize=fontsize)\n ax2.grid(alpha=0.4)\n ax2.legend(fontsize=fontsize)\n ax1.legend(fontsize=fontsize)\n ax1.set_ylabel(r'$\\log \\: p_\\theta(x, \\: z)$', fontsize=35)\n ax2.set_ylabel('ESS / L', fontsize=35)",
"def simulationDelayedTreatment(numTrials):\n numViruses = 100\n maxPop = 1000\n maxBirthProb = 0.1\n clearProb = 0.05\n resistances = {'guttagonol': False}\n mutProb = 0.005\n\n delays = [300, 150, 75, 0]\n results = []\n\n for delay in delays:\n for i in range(numTrials):\n virusList = []\n virusPop = 0\n for n in range(numViruses):\n virusList.append(ResistantVirus(maxBirthProb, clearProb, resistances, mutProb))\n my_patient = TreatedPatient(virusList, maxPop)\n\n for step in range(delay + 150):\n if step == delay:\n my_patient.addPrescription('guttagonol')\n virusPop = my_patient.update()\n results.append(virusPop)\n\n toPlot = []\n for i in range(0, len(results), numTrials):\n toPlot.append(results[i:i + numTrials])\n # print toPlot\n\n for i, _ in enumerate(delays):\n pylab.subplot(2, 2, i + 1)\n pylab.hist(toPlot[i])\n pylab.show()",
"def ratio(gb_data, data_depcode, data_ratio_hospitalises,current_date, data_hospitalises, current_date_file, min_value_80p , nbhospitalises_80p) :\n start = time.time()\n fig, ax = plt.subplots(figsize=(12, 8))\n\n plt.title(f\"Ratio of in-hospital deaths to hospitalizations : {current_date}\", fontsize=20)\n plt.ylabel(\"Total number of deceases / Total number of hospitalized\")\n plt.xlabel(\"Total number of hospitalized\")\n\n for i, txt in enumerate(data_depcode):\n if (data_hospitalises[i] > data_hospitalises.max() * 0.20):\n ax.annotate(txt, (data_hospitalises[i], data_ratio_hospitalises[i]), xytext=(data_hospitalises[i] + 20, data_ratio_hospitalises[i])) \n\n plt.axhline(data_ratio_hospitalises.mean(), color='green', linestyle='--', label=f'average death ratio ({data_ratio_hospitalises.mean():.2f}%)')\n\n plt.axvline(min_value_80p, color='pink', linestyle='-', label=f\"80% of the number of hospitalized people in France are on the right side of the line ({nbhospitalises_80p:.0f} hospitalized)\")\n\n ax.scatter(data_hospitalises, data_ratio_hospitalises)\n\n ax.annotate('updated chart',xy=(1, 0), xytext=(-15, 10), fontsize=15,\n xycoords='axes fraction', textcoords = 'offset points',\n bbox=dict(facecolor = 'white', alpha = 0.9),\n horizontalalignment = 'right', verticalalignment = 'bottom')\n\n ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.0f%%'))\n plt.legend()\n\n current_date_file = gb_data['date'].max().strftime('%Y%m%d')\n end = time.time()\n print(\"Time spent on ratio plot: {0:.5f} s.\".format(end - start)) \n plt.show()",
"def plot_graphy_resilience_targeted():\n \n global counter\n counter += 1\n random_graph = make_random_undirected_graph(1239, 0.004)\n attack_order = fast_targeted_order(random_graph)\n random_resilience = compute_resilience(random_graph, attack_order)\n plt.plot(range(len(random_resilience)), random_resilience, '-b', label= 'random, p =0.004')\n \n synthetic_undirected_graph = make_synthetic_undirected_graph(1239, 5)\n attack_order = fast_targeted_order(synthetic_undirected_graph)\n synthetic_resilience = compute_resilience(synthetic_undirected_graph, attack_order)\n plt.plot(range(len(synthetic_resilience)), synthetic_resilience, '-r', label = 'UPA, m = 5')\n\n network_graph = load_graph(NETWORK_URL)\n attack_order = fast_targeted_order(network_graph)\n network_resilience = compute_resilience(network_graph, attack_order)\n plt.plot(range(len(network_resilience)), network_resilience, '-g', label = 'Network')\n \n plt.legend(loc='upper right')\n \n plt.title(\" plot of graph resilience\")\n plt.xlabel(\"number of nodes removed\")\n plt.ylabel(\"the size of the largest connect component \")\n plt.savefig(\"graph_resilience_\"+str(counter)+\".png\", dpi = 72)\n plt.gcf().clear() # hose-keeping",
"def simulationDelayedTreatment():\n\n delays = [300, 150, 75, 0]\n colors = [\"r\", \"b\", \"g\", \"y\"]\n nbrSim = 500\n \n patient = Patient(getViruses(100, 0.1, 0.05, {\"guttagonol\":False}, 0.05), 1000)\n \n for iter in range(len(delays)):\n print \"Simulation with \", delays[iter], \"delay\"\n totalPopulation = []\n for sim in range(0, nbrSim):\n for i in range (0, delays[iter]):\n patient.update()\n\n patient.addPrescription(\"guttagonol\")\n \n for i in range (0, 150):\n patient.update()\n \n totalPopulation.append(patient.update())\n \n histPopulation(totalPopulation, \"Delay: \" + str(delays[iter]), colors[iter])\n\n pylab.legend()\n pylab.show()",
"def renderSimulation(self, target, activePatrollers, attackTime, targetPosition):\n\n\t\tt = self.no_of_discrete_time_intervals\n\t\tfSchedule = self.fSchedule\n\t\tpSchedule = self.pSchedule\n\t\tshow_legend = self.show_legend\n\n\t\txaxis = np.array([1.0*(x)/(t-1) for x in range(t)])\n\t\tattack_ferry = target[0]\n\t\tlegendArr = []\n\n\t\t# Attack on target plot\n\t\tattackPosition = game_utility.getNormalizedPosition(target[0], fSchedule, attackTime, target[1])\n\t\tplt.plot([target[1]], [attackPosition], 'ro')\n\t\tlegendArr.append(\"Attack\")\n\n\t\t# Attacked ferry plot\n\t\tplt.plot(xaxis, fSchedule[attack_ferry], '--')\n\t\tlegendArr.append(\"Ferry\" + format(attack_ferry))\n\n\t\t#Other ferry plots\n\t\tfor f in range(len(fSchedule)):\n\t\t\tif(f != attack_ferry):\n\t\t\t\tplt.plot(xaxis, fSchedule[f], '--')\n\t\t\t\tlegendArr.append(\"Ferry\" + format(f))\n\n\t\t#Patroller plots\n\t\tfor p in range(len(pSchedule)):\n\t\t\tplt.plot(xaxis, pSchedule[p], '--')\n\t\t\tlegendArr.append(\"Patroller\" + format(p))\n\n\t\t# Active Patroller\n\t\tfor activeP in range(len(activePatrollers)):\n\t\t\tattackPosition = game_utility.getNormalizedPosition(activeP, pSchedule, attackTime, target[1])\n\t\t\tplt.plot([target[1]], [attackPosition], 'gs')\n\t\t\tlegendArr.append(\"Active patrollers\")\n\n\t\tif(show_legend):\n\t\t\tplt.legend(legendArr, loc=\"upper right\")\n\n\t\tplt.axis([0, 1, 0, 1])\n\t\t#plt.xticks(np.arange(min(xaxis), max(xaxis), 0.04)) # adjust number of ticks\n\t\t#plt.grid()\n\t\tplt.xlabel('Time')\n\t\tplt.ylabel('Distance')\n\t\tplt.show()",
"def plot_running_time():\n global counter\n counter += 1\n running_time_targeted = []\n running_time_fast_targeted = []\n \n for node_number in range(10, 1000, 10):\n synthetic_undirected_graph = make_synthetic_undirected_graph(node_number, 5)\n\n start_time = time.time()\n attack_order = targeted_order(synthetic_undirected_graph)\n stop_time = time.time()\n running_time_targeted.append(stop_time - start_time)\n \n start_time = time.time()\n attack_order = fast_targeted_order(synthetic_undirected_graph)\n stop_time = time.time()\n running_time_fast_targeted.append(stop_time - start_time)\n \n plt.plot(range(10, 1000, 10), running_time_targeted, '-b', label = 'targeted_order')\n plt.plot(range(10, 1000, 10), running_time_fast_targeted, '-r', label = 'fast_targeted_order')\n \n plt.legend(loc='upper right')\n\n\n plt.title(\" plot of running time of desktop Python\")\n plt.xlabel(\"the number of nodes\")\n plt.ylabel(\"running times\")\n plt.savefig(\"running_time_\"+str(counter)+\".png\", dpi = 72)\n plt.gcf().clear() # hose-keeping",
"def SA_data_display(opt_df, all_df):\n fig, axs = plt.subplots(2, 3)\n\n axs[0,0].set_title(\"Optimal rewire attempts for circularity\")\n axs[0,0].set_ylabel(\"Percent waste %\")\n axs[0,0].set_xlabel(\"Time (s)\")\n axs[0,0].plot(opt_df[\"Time (s)\"], opt_df[\"Percent waste (%)\"])\n\n axs[0,1].set_title(\"Optimal rewire attempts acceptance probability\")\n axs[0,1].set_ylabel(\"Acceptance Probability\")\n axs[0,1].set_xlabel(\"Time (s)\") # time??\n axs[0,1].scatter(opt_df[\"Time (s)\"], opt_df[\"Probability\"])\n\n axs[0,2].set_title(\"Optimal rewire attempts temperature decrease\")\n axs[0,2].set_ylabel(\"Temperature\")\n axs[0,2].set_xlabel(\"Time (s)\") # time??\n axs[0,2].plot(opt_df[\"Time (s)\"], opt_df[\"Temperature\"])\n\n axs[1,0].set_title(\"All rewire attempts for circularity\")\n axs[1,0].set_ylabel(\"Percent waste %\")\n axs[1,0].set_xlabel(\"Time (s)\")\n axs[1,0].plot(all_df[\"Time (s)\"], all_df[\"Percent waste (%)\"])\n\n axs[1,1].set_title(\"All rewire attempts acceptance probability\")\n axs[1,1].set_ylabel(\"Acceptance Probability\")\n axs[1,1].set_xlabel(\"Time (s)\") # time??\n axs[1,1].scatter(all_df[\"Time (s)\"], all_df[\"Probability\"])\n\n axs[1,2].set_title(\"All rewire attempts temperature decrease\")\n axs[1,2].set_ylabel(\"Temperature\")\n axs[1,2].set_xlabel(\"Time (s)\") # time??\n axs[1,2].plot(all_df[\"Time (s)\"], all_df[\"Temperature\"])\n\n return plt.show()",
"def punto2_2():\r\n\tprint(\"2.2R/\")\r\n\tp_small = [0.005*i for i in range(20)]\r\n\tp_large =[0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6,0.65,0.7,0.75,0.8,0.85,0.9,0.95,1]\r\n\tp_large= p_small + p_large\t\r\n\ts = multStats2(p_small,100,500)\r\n\ty1,y2,y3,y4,y5= s[0],s[1],s[2],s[3],s[4]\r\n\tplt.plot(p_small,y1, label = \"Avrg num of ArtP\")\r\n\tplt.plot(p_small,y2, label = \"Avrg num of Brid\")\r\n\tplt.plot(p_small,y3, label = \"Avrg size per CC\")\r\n\tplt.plot(p_small,y4, label = \"Avrg node Deg\")\r\n\t#plt.plot(p_small,y5)\r\n\tplt.xlabel(\"P_Small\")\r\n\tplt.ylabel(\"Average Values\")\r\n\tplt.title(\"Random Graph Metrics\")\r\n\tplt.legend()\r\n\tplt.show()\r\n\ts = multStats3(p_large,20,100)\r\n\ty2,y3,y4 = s[0],s[1],s[2]\r\n\ty1 = multStats4(p_large,100,500)\r\n\tplt.plot(p_large,y1, label = \"Avrg num of CC\")\r\n\tplt.plot(p_large,y2, label = \"Avrg num of triangles\")\r\n\tplt.plot(p_large,y3, label = \"Avrg num of triplets\")\r\n\tplt.plot(p_large,y4, label = \"#triangles/#triplets\")\r\n\tplt.xlabel(\"P_Small & P_Large\")\r\n\tplt.ylabel(\"Average Values\")\r\n\tplt.title(\"Random Graph Metrics\")\r\n\tplt.legend()\r\n\tplt.show()\r\n\treturn",
"def plot_pupil_diameter_hist(pupil_diameter, cam_times, trials_df, cam='left'):\r\n for align_to, color in zip(['stimOn_times', 'feedback_times'], ['red', 'purple']):\r\n start_window, end_window = plt_window(trials_df[align_to])\r\n start_idx = insert_idx(cam_times, start_window)\r\n end_idx = np.array(start_idx + int(WINDOW_LEN * SAMPLING[cam]), dtype='int64')\r\n # Per trial norm\r\n pupil_all = [zscore(list(pupil_diameter[start_idx[i]:end_idx[i]])) for i in range(len(start_idx))]\r\n pupil_all_norm = [trial - trial[0] for trial in pupil_all]\r\n\r\n pupil_mean = np.nanmean(pupil_all_norm, axis=0)\r\n pupil_std = np.nanstd(pupil_all_norm, axis=0) / np.sqrt(len(pupil_all_norm))\r\n times = np.arange(len(pupil_all_norm[0])) / SAMPLING[cam] + WINDOW_LAG\r\n\r\n plt.plot(times, pupil_mean, label=align_to.split(\"_\")[0], color=color)\r\n plt.fill_between(times, pupil_mean + pupil_std, pupil_mean - pupil_std, color=color, alpha=0.5)\r\n plt.axvline(x=0, linestyle='--', c='k')\r\n plt.title(f'Pupil diameter trial avg\\n({cam.upper()} cam)')\r\n plt.xlabel('time [sec]')\r\n plt.xticks([-0.5, 0, 0.5, 1, 1.5])\r\n plt.ylabel('z-scored smoothed pupil diameter [px]')\r\n plt.legend(loc='lower right', title='aligned to')",
"def decay_proportion_plot(Lmax=1000, p1=database['K+'], p=75, target_rate=53957518.001):\r\n L_range = np.linspace(0, 1000, 10000)\r\n prop = []\r\n for L in L_range:\r\n prop.append(decay_proportion(L, p1, p, target_rate))\r\n# charac_L = p*c*(p1.tau*1e-3/c)/p1.mass\r\n fig = plt.figure(figsize=[12, 3])\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.plot(L_range, prop, 'r', lw=2)\r\n ax.set_xlim(0, Lmax)\r\n ax.set_ylim(0)\r\n ax.set_xlabel('Target Distance', fontsize=20)\r\n ax.set_ylabel(r'$K^+$ flux', fontsize=20)\r\n# ax.xaxis.set_major_locator(plt.MultipleLocator(charac_L/4))\r\n# ax.xaxis.set_minor_locator(plt.MultipleLocator(charac_L/20))\r\n# ax.xaxis.set_major_formatter(plt.FuncFormatter(multiple_formatter_non_int(1, charac_L, 'L_{K^+}')))\r\n ax.set_xticks([0])\r\n ax.set_yticks([target_rate])\r\n ax.yaxis.set_major_locator(plt.MultipleLocator(target_rate/1))\r\n ax.yaxis.set_minor_locator(plt.MultipleLocator(target_rate/1))\r\n ax.yaxis.set_major_formatter(plt.FuncFormatter(multiple_formatter_non_int(1, target_rate, 'R_t')))\r\n ax.legend(fontsize=20)\r\n ax.minorticks_off()\r\n# ax.grid()\r\n plt.show()\r\n return",
"def showPlot1():\n\n interested_in = list(range(5,30,5))\n proc_sim_data = []\n for item in interested_in:\n len_sim_data = []\n raw_sim_data = runSimulation(1, 1.0, item, item, 0.75, 100, Robot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n proc_sim_data.append(sum(len_sim_data)/len(len_sim_data))\n plot(interested_in, proc_sim_data)\n title('Dependence of cleaning time on room size')\n xlabel('area of the room (tiles)')\n ylabel('mean time (clocks)')\n show()",
"def generatePlot(data):\n addendum = \"\"\n destination = \"D:\\\\Research\\\\scripts\\\\Results\\\\FullSet1\\\\$FilteredPlots\\\\take 4\\\\\"\n if len(data.detections.smallIncrease) != 0:\n addendum = \"small increases\\\\\"\n if len(data.detections.smallDecrease) != 0:\n addendum = \"small decreases\\\\\"\n if len(data.detections.largeIncrease) != 0:\n addendum = \"large increases\\\\\"\n if len(data.detections.largeDecrease) != 0:\n addendum = \"large decreases\\\\\"\n if addendum == \"\":\n addendum = \"no decreases\\\\\"\n \n plt.figure(1)\n plt.subplot(211)\n #print np.min(data.magdata), np.max(data.magdata)\n axes = plt.gca()\n axes.set_title(\"Year: '{year}, Day: {day}\".format(year=data.calendarDay[:2], day=data.calendarDay[3:] ))\n axes.set_ylim([np.min(data.magdata)-1.2,np.max(data.magdata)+0.25])\n axes.set_ylabel(r'$\\mathbf{B}$ (nT)' )\n\n #plot formatting\n formats = dates.DateFormatter('%H:%M:%S')\n axes.xaxis.set_major_locator(dates.MinuteLocator())\n axes.xaxis.set_major_formatter(formats)\n \n br, = pp.plot(dates.date2num(data.timestamps),[row[0] for row in data.magdata],label='$B_r$')\n bt, = pp.plot(dates.date2num(data.timestamps),[row[1] for row in data.magdata],label='$B_t$')\n bn, = pp.plot(dates.date2num(data.timestamps),[row[2] for row in data.magdata],label='$B_n$')\n b0, = pp.plot(dates.date2num(data.timestamps),[row[3] for row in data.magdata],label='$B_0$')\n print len(data.detections.rotationBoundary)\n if len(data.detections.rotationBoundary) == 1:\n rotation, = pp.plot([dates.date2num(data.detections.rotationBoundary), dates.date2num(data.detections.rotationBoundary)], [np.min(data.magdata)-1,np.max(data.magdata)+0.25], linestyle='--', color = 'm', alpha = 0.4, label='$RB$')\n else:\n for index, value in enumerate(data.detections.rotationBoundary):\n rotation, = pp.plot([dates.date2num(value), dates.date2num(value)], [np.min(data.magdata)-1,np.max(data.magdata)+0.25], linestyle='--', color = 'm', alpha = 0.4, label='$RB$')\n if len(data.detections.rotationBoundary) != 0:\n pp.legend(handles=[br,bt,bn,b0,rotation], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n else:\n pp.legend(handles=[br,bt,bn,b0], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n\n start, end = axes.get_xlim()\n axes.xaxis.set_ticks(np.arange(start, end, (end-start)/5))\n \n \n\n plt.subplot(212)\n axes2 = plt.gca()\n #plot formatting\n formats = dates.DateFormatter('%H:%M:%S')\n axes2.xaxis.set_major_locator(dates.MinuteLocator())\n axes2.xaxis.set_major_formatter(formats)\n axes2.set_ylabel(r'$\\theta$ (deg)' )\n rotations, = pp.plot(dates.date2num(data.detections.rotationTimeTags),data.detections.rotations)\n #pp.legend(handles=[rotations], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n \n\n outplotname = 'Plot ' + str(len(os.listdir(destination+addendum)) + 1)+ ' ' + data.timestamps[0].strftime('%y-%j-%H%M%S') + '.pdf'\n completename = os.path.join(destination+addendum,outplotname)\n plt.savefig(completename, bboxinches='tight')\n plt.clf()\n\n outplotname = 'Plot ' + str(len(os.listdir(destination+'rawdata\\\\'+addendum)) + 1)+ ' ' + data.timestamps[0].strftime('%y-%j-%H%M%S') + ' rawdata.csv'\n completename1 = os.path.join(destination+'rawdata\\\\'+addendum,outplotname)\n generateDataFile(data.rawdata,completename1)\n\n print \"Done generating plot...\"",
"def make_plot_two_hospitals_arrival_split(\n lambda_2,\n lambda_1_1,\n lambda_1_2,\n mu_1,\n mu_2,\n num_of_servers_1,\n num_of_servers_2,\n threshold_1,\n threshold_2,\n measurement_type=\"b\",\n seed_num_1=None,\n seed_num_2=None,\n warm_up_time=100,\n trials=1,\n accuracy=10,\n runtime=1440,\n):\n hospital_times_1 = []\n hospital_times_2 = []\n all_arrival_rates = np.linspace(0, lambda_2, accuracy + 1)\n for arrival_rate_1 in all_arrival_rates[1:-1]:\n arrival_rate_2 = lambda_2 - arrival_rate_1\n times_1 = get_multiple_runs_results(\n arrival_rate_1,\n lambda_1_1,\n mu_1,\n num_of_servers_1,\n threshold_1,\n seed_num_1,\n warm_up_time,\n trials,\n runtime,\n )\n times_2 = get_multiple_runs_results(\n arrival_rate_2,\n lambda_1_2,\n mu_2,\n num_of_servers_2,\n threshold_2,\n seed_num_2,\n warm_up_time,\n trials,\n runtime,\n )\n hospital_times_1, hospital_times_2 = update_hospitals_lists(\n hospital_times_1, hospital_times_2, times_1, times_2, measurement_type\n )\n\n x_axis_label, y_axis_label, title = get_two_hospital_plot_labels(measurement_type)\n x_labels = all_arrival_rates[1:-1] / all_arrival_rates[-1]\n plt.figure(figsize=(23, 10))\n waiting_time_plot = plt.plot(x_labels, hospital_times_1, ls=\"solid\", lw=1.5)\n plt.plot(x_labels, hospital_times_2, ls=\"solid\", lw=1.5)\n plt.legend([\"Hospital 1\", \"Hospital 2\"], fontsize=\"x-large\")\n plt.title(title, fontsize=18)\n plt.xlabel(x_axis_label, fontsize=15, fontweight=\"bold\")\n plt.ylabel(y_axis_label, fontsize=15, fontweight=\"bold\")\n\n return waiting_time_plot",
"def exercise_1(self):\n gdp = self.gdp\n phones = self.phones \n percent_literate = self.percent_literate\n # print(len(gdp), len(phones),len(percent_literate))\n print(type(self.percent_literate[1]))\n print((percent_literate[1]))\n\n # Create scatter plot with GDP on the x-axis and number of phones on the y-axis\n sns.scatterplot(x = gdp, y = phones)\n plt.show()\n\n # Change this scatter plot to have percent literate on the y-axis\n # sns.scatterplot(x=gdp, y=percent_literate) \n # plt.show()",
"def setup_plot(self):\n\n # Get all the healthy, immune, infected, and dead people seperately \n healthy_x = self.putil.population.get_all_healthy()[:, index.x_axis]\n healthy_y = self.putil.population.get_all_healthy()[:, index.y_axis]\n infected_x = self.putil.population.get_all_infected()[:, index.x_axis]\n infected_y = self.putil.population.get_all_infected()[:, index.y_axis]\n immune_x = self.putil.population.get_all_recovered()[:, index.x_axis]\n immune_y = self.putil.population.get_all_recovered()[:, index.y_axis]\n dead_x = self.putil.population.get_all_dead()[:, index.x_axis]\n dead_y = self.putil.population.get_all_dead()[:, index.y_axis]\n total_infected = self.putil.size - len(healthy_x)\n total_hospitalized = len(self.putil.persons[self.putil.persons[:,index.hospitalized] == 3])\n \n # Current healthcare status\n self.healthcare_status = \"Normal\"\n \n # Scatter plots to plot people\n self.scat = self.ax.scatter(healthy_x,\n healthy_y, vmin=0, vmax=1,\n cmap=\"jet\", c=\"lightsteelblue\", s=10)\n self.scat2 = self.ax.scatter(infected_x,\n infected_y, vmin=0, vmax=1,\n cmap=\"jet\", c=\"indianred\", s=10)\n self.scat3 = self.ax.scatter(immune_x,\n immune_y, vmin=0, vmax=1,\n cmap=\"jet\", c=\"mediumseagreen\", s=10)\n self.scat4 = self.ax.scatter(dead_x,\n dead_y, vmin=0, vmax=1,\n cmap=\"jet\", c=\"indigo\", s=10)\n # Lists for line graph\n self.infected = []\n self.infected_total = []\n self.deaths = []\n self.frames = []\n self.immunes = []\n self.infected.append(len(infected_x))\n self.deaths.append(len(dead_x))\n self.infected_total.append(self.putil.size - len(healthy_x))\n self.immunes.append(len(immune_x))\n self.frames.append(0)\n\n # Line graph plotting number\n self.total_infected, = self.ax1.plot(self.frames, self.infected_total)\n self.currently_infected, = self.ax1.plot(self.frames, self.infected, c=\"indianred\", label='Currently Infected')\n self.total_deaths, = self.ax1.plot(self.frames, self.deaths, c=\"indigo\", label='Total Dead')\n self.total_immune, = self.ax1.plot(self.frames, self.immunes, c=\"mediumseagreen\", label='Total Immune')\n\n # Code below prints statistics \n if(self.putil.enforce_social_distance_at > 0):\n self.ax1.plot([self.putil.enforce_social_distance_at]*2, [0,self.putil.size],c=\"gold\", label=\"Social Distancing\")\n self.social_distancing_info = (\"At frame \" + str(self.putil.enforce_social_distance_at))\n self.social_distancing_num = str(int(self.putil.social_distance_per * self.putil.size)) + \" or \" + str(self.putil.social_distance_per*100)+\"%\"\n else:\n self.social_distancing_info = (\"Disabled\")\n self.social_distancing_num = \"0 or 0%\"\n\n if(self.putil.enforce_mask_wearing_at > 0):\n self.ax1.plot([self.putil.enforce_mask_wearing_at]*2, [0,self.putil.size],c=\"hotpink\", label=\"Mask Mandate\")\n self.mask_wearing_info = \"At frame \" + str(self.putil.enforce_mask_wearing_at) \n else:\n self.mask_wearing_info = \"Disabled\"\n\n self.ax1.tick_params(axis=\"y\",direction=\"in\", pad=3)\n self.ax1.plot([0,1000],[self.putil.virus.total_healthcare_capacity]*2, c=\"silver\")\n self.ax1.get_xaxis().set_visible(False)\n self.ax1.legend(prop={'size': 8},loc='upper right')\n self.ax2.text(0,1,\"Statistics\", fontsize='large' , fontweight='bold')\n self.ax2.text(0,-0.5, \"Frame:\\nCurrently Infected:\\nHealthy People:\\nImmune People:\\nTotal Deaths:\\nHealthcare Conditions:\")\n self.ax2.text(0.54,-0.5, \"Population:\\nMasks Wearing:\\nSocial Distancing:\\nPeople Distancing:\\nTotal Infected:\\n\")\n self.ax.text(0,1.06, \"Simulation\", fontsize='xx-large' , fontweight='bold')\n self.text = self.ax2.text(0.33, -0.5, \"%i \\n%i \\n%s \\n%s \\n%s \\n%s\" %(0,len(infected_x),str(len(healthy_x)) + \" or 0%\", str(len(immune_x)) + \" or 0%\",str(len(dead_x)) + \" or 0%\",self.healthcare_status))\n self.text2 = self.ax2.text(0.81,-0.5,\"%d \\n%s \\n%s \\n%s \\n%s\\n\" % (self.putil.size, self.mask_wearing_info, self.social_distancing_info, self.social_distancing_num , total_infected))\n\n return self.scat, self.scat2, self.scat3, self.scat4, self.currently_infected, self.total_infected,",
"def showPlot2():\n interested_in = list(range(1,10))\n proc_sim_data = []\n for item in interested_in:\n len_sim_data = []\n raw_sim_data = runSimulation(item, 1.0, 25, 25, 0.75, 100, Robot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n proc_sim_data.append(sum(len_sim_data)/len(len_sim_data))\n plot(interested_in, proc_sim_data)\n title('Dependence of cleaning time on number of robots')\n xlabel('number of robots (tiles)')\n ylabel('mean time (clocks)')\n show()",
"def plot_stay_prob(ax, stay_prob):\n mean_stay_prob = np.mean(stay_prob, 2)\n std_stay_prob = sem(stay_prob, 2)\n # set width of bar\n bar_width = 0.25\n\n # Set position of bar on X axis\n r1 = np.arange(2)\n r2 = [x + bar_width for x in r1]\n\n ax.bar(r1, mean_stay_prob[0, :], yerr=std_stay_prob[0, :],\n color='b', width=bar_width, edgecolor='white', label='Common')\n ax.bar(r2, mean_stay_prob[1, :], yerr=std_stay_prob[1, :],\n color='r', width=bar_width, edgecolor='white', label='Uncommon')\n\n # Add xticks on the middle of the group bars\n ax.set_xticks([r + bar_width/2 for r in range(2)])\n ax.set_xticklabels(['Rewarded', 'Unrewarded'])\n ax.set_ylabel('Stay Probability')\n ax.set_title('A2C-LSTM Model')\n ax.set_ylim(0, 1)\n ax.legend()",
"def boot_induvidual_plot(self): # Setting up induvidual plots\n self.plot_traits = list([self.plt_0.subplot2grid((2, 5), (0, 0)), self.plt_0.subplot2grid((2, 5), (0, 1)),\n self.plt_0.subplot2grid((2, 5), (0, 2)), self.plt_0.subplot2grid((2, 5), (0, 3)),\n self.plt_0.subplot2grid((2, 5), (0, 4))])\n\n # creatng list of plot objects\n\n for x in range(len(self.X_transp)): # Iterating over each attributes patient\n\n present=self.plot_traits[x]\n # Selecting a particular plot object\n present.set_facecolor('orange')\n # setting face color\n present.scatter(self.np_0.arange(len(self.list_patient_names)),self.X_transp[x],c='blue')\n # drawing a scatter plot of this attribute\n\n present.xaxis.set_major_locator(plt.MultipleLocator(1))\n\n present.set_xlabel('Patient ID', fontweight='bold')\n # setting X-LABEL\n present.set_ylabel(self.list_attributes[x], fontweight='bold')\n # setting Y-LABEL\n present.title.set_text(self.list_attributes[x]+\" Variation\")\n # setting Title\n\n present = self.plt_0.subplot2grid((2, 5), (1, 0), colspan=5)\n # to plot the present's status\n present.scatter(self.X_reduced_transp[0], self.X_reduced_transp[1], c='red')\n # plotting in the BOTTOM-PLOT\n\n present.set_xlabel(\"Principle Component -1\", fontweight='bold')\n # setting X-LABEL\n present.set_ylabel(\"Principle Component -2\", fontweight='bold')\n # setting Y-LABEL\n\n for x in range(len(self.list_patient_names)): # Naming each patient with ID\n self.list_patient_names[x] = \"Patient \" + str(x)\n # Eg: Patient 0,Patient 1...\n for i, txt in enumerate(self.list_patient_names): # This is used to enumerate the scatter plots label\n present.annotate(txt, (self.X_reduced_transp[0][i] + 1, self.X_reduced_transp[1][i]), fontsize=10, c='black')\n # Coonecting with present",
"def plot(t): \n assert isinstance(t, int), \"'t' argument should be an integer.\"\n assert t > 0, \"'t' argument should be a positive integer.\" \n # Initialize arrays with zeros to store mean cumulative rewards upto t \n # rounds for each of the three implemented bandit algorithms\n EpsGreedy_rewards = np.zeros(t)\n UCB_rewards = np.zeros(t)\n LinUCB_rewards = np.zeros(t)\n # For each round, store the mean cumulative rewards upto that round\n for i in range(1,t):\n EpsGreedy_rewards[i] = np.sum(results_EpsGreedy[0:i]) / t\n UCB_rewards[i] = np.sum(results_UCB[0:i]) / t\n LinUCB_rewards[i] = np.sum(results_LinUCB[0:i]) / t\n # Plot running per round cumulative reward\n plt.plot(range(0,t), EpsGreedy_rewards, color='b', label='e-Greedy')\n plt.plot(range(0,t), UCB_rewards, color='g', label='UCB')\n plt.plot(range(0,t), LinUCB_rewards, color='orange', label='LinUCB')\n plt.xlabel('Round')\n plt.ylabel('Mean Cumulative Reward')\n plt.title('Running Per Round Cumulative Reward')\n plt.legend()\n plt.show()"
]
| [
"0.6007617",
"0.5770197",
"0.5752358",
"0.5692155",
"0.5691918",
"0.5655623",
"0.56300396",
"0.55511606",
"0.5392734",
"0.53024817",
"0.5274552",
"0.5269573",
"0.52643716",
"0.5222119",
"0.52014303",
"0.5184559",
"0.51777875",
"0.5176976",
"0.5171472",
"0.5152258",
"0.51350963",
"0.50970036",
"0.5082032",
"0.50565696",
"0.50442654",
"0.503887",
"0.5034732",
"0.5031776",
"0.5023114",
"0.5021802"
]
| 0.6680112 | 0 |
A function to get necessary labels for the two hospitals plot | def get_two_hospital_plot_labels(measurement_type):
if measurement_type == "w":
title = "Waiting times of two hospitals over different distribution of patients"
y_axis_label = "Waiting Time"
else:
title = (
"Blocking times of two hospitals over different distribution of patients"
)
y_axis_label = "Blocking Time"
x_axis_label = "Hospital 1 arrival proportion"
return (x_axis_label, y_axis_label, title) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_labels(x_label, y_label, title, xlabel_str):\n if x_label is None:\n x_label = xlabel_str\n\n if y_label is None:\n y_label = \"Degree of membership\"\n\n if title is None:\n title = \"Degrees of membership of the samples to each cluster\"\n\n return x_label, y_label, title",
"def subplot_labels(plot):\n a = plt.text(0.05, 0.8, '(a)', fontsize='x-large', weight='bold',\n horizontalalignment='center', verticalalignment='center',\n transform=plot.ax_th.transAxes)\n b = plt.text(0.065, 0.80, '(b)', fontsize='x-large', weight='bold',\n horizontalalignment='center', verticalalignment='center',\n transform=plot.ax_lon.transAxes)\n c = plt.text(0.30, 0.80, '(c)', fontsize='x-large', weight='bold',\n horizontalalignment='center', verticalalignment='center',\n transform=plot.ax_hist.transAxes)\n d = plt.text(0.065, 0.95, '(d)', fontsize='x-large', weight='bold',\n horizontalalignment='center', verticalalignment='center',\n transform=plot.ax_plan.transAxes)\n e = plt.text(0.30, 0.95, '(e)', fontsize='x-large', weight='bold',\n horizontalalignment='center', verticalalignment='center',\n transform=plot.ax_lat.transAxes)\n return [a,b,c,d,e]",
"def labels(self):\n return self.label(self.p_y_given_x)",
"def _timeseries_scatter_plot_lbls(self, results_dict, keys, axes, meta):\n if meta[\"var_combination\"].partition(\":\")[-1] == \"tas\":\n against_region = \"Global\"\n else:\n against_region = (\n f\"{self.cfg['region'][2]}$^o$ N-{self.cfg['region'][3]}\"\n f\"$^o$ N latitudinal belt\")\n large_scale_units = self.formatter(\n str(\n iris.load_cube(\n results_dict['large_scale'][keys[0][-1]]).units))\n regional_units = self.formatter(\n str(iris.load_cube(results_dict['regional'][keys[1][-1]]).units))\n xlabel = (f\"{against_region} \"\n f\"{meta['var_combination'].partition(':')[-1].upper()} \"\n f\"[{large_scale_units}]\")\n axes.set_xlabel(xlabel)\n ylabel = (f\"{self.cfg['region_name']} \"\n f\"{meta['var_combination'].partition(':')[0].upper()} \"\n f\"[{regional_units}]\")\n axes.set_ylabel(ylabel)\n\n axes.set_title(f\"Scenario: {meta['title_format']} \\n CMIP5: rval=\"\n f\"{meta['rvalue']['cmip5']:.3f}; \"\n f\"slope={meta['slope']['cmip5']:.3f} \"\n f\"\\n CMIP6: rval={meta['rvalue']['cmip6']:.3f}; \"\n f\"slope={meta['slope']['cmip6']:.3f}\")\n axes.legend(handles=meta[\"legend_elements\"])\n\n long_name_dict = {\"pr\": \"precipitation\", \"tas\": \"temperature\"}\n if meta[\"var_combination\"] == \"pr:tas\":\n suptitle = (f\"{self.cfg['region_name']} {meta['season'].upper()} \"\n f\"precipitation vs global {meta['season'].upper()} \"\n f\"temperature.\\n 10yr rolling means 1960-2100, \"\n f\"Baseline: 1986-2005\")\n plt.suptitle(suptitle)\n else:\n y_combination = meta[\"var_combination\"].partition(':')[0]\n suptitle = (f\"{self.cfg['region_name']} vs {against_region} \"\n f\"{meta['season'].upper()} \"\n f\"{long_name_dict[y_combination]}\"\n f\".\\n 10yr rolling means 1960-2100, \"\n f\"Baseline: 1986-2005\")\n plt.suptitle(suptitle)\n return suptitle",
"def get_plot_for_different_thresholds_labels(measurement_type):\n if measurement_type == \"w\":\n title = \"Waiting times over different thresholds\"\n y_axis_label = \"Waiting Time\"\n elif measurement_type == \"b\":\n title = \"Blocking times over different thresholds\"\n y_axis_label = \"Blocking Time\"\n else:\n title = \"Waiting and blocking times over different thresholds\"\n y_axis_label = \"Waiting and Blocking Time\"\n\n x_axis_label = \"Capacity Threshold\"\n return (x_axis_label, y_axis_label, title)",
"def subplotLabel(axs):\n for ii, ax in enumerate(axs):\n ax.text(-0.2, 1.2, ascii_uppercase[ii], transform=ax.transAxes, fontsize=16, fontweight=\"bold\", va=\"top\")",
"def addlabels(x, y):\n\n for i in range(len(x)):\n plt.text(i, y[i], y[i], ha='center')",
"def getLabel2(*args):",
"def getLabel2(*args):",
"def create_labels(dataset, prog_type_dict, other_label, **kwargs):\n top_labels = choose_top_labels(dataset, prog_type_dict, **kwargs)\n\n label_to_idx = {x: top_labels.index(x) for x in top_labels}\n idx_to_label = {v: k for k, v in label_to_idx.items()}\n if other_label != \"\":\n label_to_idx[other_label] = len(top_labels) + 1\n idx_to_label[len(top_labels)] = other_label\n return label_to_idx, idx_to_label",
"def setup_plot(self):\n\n # Get all the healthy, immune, infected, and dead people seperately \n healthy_x = self.putil.population.get_all_healthy()[:, index.x_axis]\n healthy_y = self.putil.population.get_all_healthy()[:, index.y_axis]\n infected_x = self.putil.population.get_all_infected()[:, index.x_axis]\n infected_y = self.putil.population.get_all_infected()[:, index.y_axis]\n immune_x = self.putil.population.get_all_recovered()[:, index.x_axis]\n immune_y = self.putil.population.get_all_recovered()[:, index.y_axis]\n dead_x = self.putil.population.get_all_dead()[:, index.x_axis]\n dead_y = self.putil.population.get_all_dead()[:, index.y_axis]\n total_infected = self.putil.size - len(healthy_x)\n total_hospitalized = len(self.putil.persons[self.putil.persons[:,index.hospitalized] == 3])\n \n # Current healthcare status\n self.healthcare_status = \"Normal\"\n \n # Scatter plots to plot people\n self.scat = self.ax.scatter(healthy_x,\n healthy_y, vmin=0, vmax=1,\n cmap=\"jet\", c=\"lightsteelblue\", s=10)\n self.scat2 = self.ax.scatter(infected_x,\n infected_y, vmin=0, vmax=1,\n cmap=\"jet\", c=\"indianred\", s=10)\n self.scat3 = self.ax.scatter(immune_x,\n immune_y, vmin=0, vmax=1,\n cmap=\"jet\", c=\"mediumseagreen\", s=10)\n self.scat4 = self.ax.scatter(dead_x,\n dead_y, vmin=0, vmax=1,\n cmap=\"jet\", c=\"indigo\", s=10)\n # Lists for line graph\n self.infected = []\n self.infected_total = []\n self.deaths = []\n self.frames = []\n self.immunes = []\n self.infected.append(len(infected_x))\n self.deaths.append(len(dead_x))\n self.infected_total.append(self.putil.size - len(healthy_x))\n self.immunes.append(len(immune_x))\n self.frames.append(0)\n\n # Line graph plotting number\n self.total_infected, = self.ax1.plot(self.frames, self.infected_total)\n self.currently_infected, = self.ax1.plot(self.frames, self.infected, c=\"indianred\", label='Currently Infected')\n self.total_deaths, = self.ax1.plot(self.frames, self.deaths, c=\"indigo\", label='Total Dead')\n self.total_immune, = self.ax1.plot(self.frames, self.immunes, c=\"mediumseagreen\", label='Total Immune')\n\n # Code below prints statistics \n if(self.putil.enforce_social_distance_at > 0):\n self.ax1.plot([self.putil.enforce_social_distance_at]*2, [0,self.putil.size],c=\"gold\", label=\"Social Distancing\")\n self.social_distancing_info = (\"At frame \" + str(self.putil.enforce_social_distance_at))\n self.social_distancing_num = str(int(self.putil.social_distance_per * self.putil.size)) + \" or \" + str(self.putil.social_distance_per*100)+\"%\"\n else:\n self.social_distancing_info = (\"Disabled\")\n self.social_distancing_num = \"0 or 0%\"\n\n if(self.putil.enforce_mask_wearing_at > 0):\n self.ax1.plot([self.putil.enforce_mask_wearing_at]*2, [0,self.putil.size],c=\"hotpink\", label=\"Mask Mandate\")\n self.mask_wearing_info = \"At frame \" + str(self.putil.enforce_mask_wearing_at) \n else:\n self.mask_wearing_info = \"Disabled\"\n\n self.ax1.tick_params(axis=\"y\",direction=\"in\", pad=3)\n self.ax1.plot([0,1000],[self.putil.virus.total_healthcare_capacity]*2, c=\"silver\")\n self.ax1.get_xaxis().set_visible(False)\n self.ax1.legend(prop={'size': 8},loc='upper right')\n self.ax2.text(0,1,\"Statistics\", fontsize='large' , fontweight='bold')\n self.ax2.text(0,-0.5, \"Frame:\\nCurrently Infected:\\nHealthy People:\\nImmune People:\\nTotal Deaths:\\nHealthcare Conditions:\")\n self.ax2.text(0.54,-0.5, \"Population:\\nMasks Wearing:\\nSocial Distancing:\\nPeople Distancing:\\nTotal Infected:\\n\")\n self.ax.text(0,1.06, \"Simulation\", fontsize='xx-large' , fontweight='bold')\n self.text = self.ax2.text(0.33, -0.5, \"%i \\n%i \\n%s \\n%s \\n%s \\n%s\" %(0,len(infected_x),str(len(healthy_x)) + \" or 0%\", str(len(immune_x)) + \" or 0%\",str(len(dead_x)) + \" or 0%\",self.healthcare_status))\n self.text2 = self.ax2.text(0.81,-0.5,\"%d \\n%s \\n%s \\n%s \\n%s\\n\" % (self.putil.size, self.mask_wearing_info, self.social_distancing_info, self.social_distancing_num , total_infected))\n\n return self.scat, self.scat2, self.scat3, self.scat4, self.currently_infected, self.total_infected,",
"def __place_statistics_labels(self):\n\n base_x = self.__statistics_coords[\"x\"]\n base_y = self.__statistics_coords[\"y\"]\n active_lines_label = Label(self.__main_window, textvariable=self.__active_lines_stringvar, fg=\"#1DB954\", bg = \"#000000\", font = (self.__font_name, 18))\n number_of_buses_label = Label(self.__main_window, textvariable=self.__active_buses_stringvar, fg=\"#1DB954\", bg = \"#000000\", font = (self.__font_name, 18))\n number_of_people_lable = Label(self.__main_window, textvariable=self.__number_of_people_stringvar, fg=\"#1DB954\", bg = \"#000000\", font = (self.__font_name, 18))\n session_time_lable = Label(self.__main_window, textvariable=self.__session_time_stringvar, fg=\"#1DB954\", bg = \"#000000\", font = (self.__font_name, 23))\n number_of_people_lable.place(x=base_x, y=base_y)\n active_lines_label.place(x=base_x-35, y=base_y + 35)\n number_of_buses_label.place(x=base_x+54, y=base_y + 69)\n session_time_lable.place(x=base_x-70, y=base_y + 116)",
"def get_extra_label(self, label_name: str, hierarchy: List[str]) -> Any:",
"def get_labels(self):\n return [\"contradiction\", \"entailment\", \"neutral\"]",
"def drawlabels(t, t1):\r\n t.fd(250)\r\n t.pd()\r\n t.write(\"Life\", font=(\"Arial\", 10, \"bold\"))\r\n t.pu()\r\n t.back(12)\r\n t.pd()\r\n t.write(\"Exp.\", font=(\"Arial\", 10, \"bold\"))\r\n t.pu()\r\n t.back(238)\r\n t.right(90)\r\n t.fd(80)\r\n t1.pu()\r\n t1.back(50)\r\n t1.rt(90)\r\n t1.fd(250)\r\n t1.pd()\r\n t1.write(\"Year\", font=(\"Arial\", 10, \"bold\"))\r\n t1.pu()\r\n t1.back(250)\r\n t1.left(90)\r\n t1.fd(50)",
"def SAMT_labels(self):\n \t\t#find basic labels\n \t\tlabels_basic = self.dependency_labels()\n \t\tlabels = Labels(labels_basic)\n \t\treturn labels.SAMT_labels()",
"def get_labels():\n return {\"contradiction\": 0, \"neutral\": 1, \"entailment\": 2}",
"def CombinedPlotHelper(self,minc=70,maxc=120,num=25):\n levels = np.linspace(minc,maxc,num+1)\n title = textwrap.dedent(\"\"\"\\\n Orography difference between LGM and Modern ICE-5G data\n using {0} meter contour interval\"\"\").format((maxc-minc)/num)\n plt.figure()\n ax = plt.subplot(111)\n contourset = plt.contourf(self.difference_in_ice_5g_orography,\n levels=levels,hold=True)\n cbar = plt.colorbar()\n cbar.ax.set_ylabel('Orography difference in meters')\n plt.contour(self.difference_in_ice_5g_orography,levels=contourset.levels,\n colors='black',hold=True)\n ufcntr = plt.contourf(self.difference_in_ice_5g_orography,\n levels=[np.min(self.difference_in_ice_5g_orography),minc],\n colors='white',\n hatches=['/'],hold=True)\n ofcntr = plt.contourf(self.difference_in_ice_5g_orography,\n levels=[maxc,np.max(self.difference_in_ice_5g_orography)],\n colors='white',\n hatches=['\\\\'],hold=True)\n ufartists,uflabels = ufcntr.legend_elements() #@UnusedVariable\n ofartists,oflabels = ofcntr.legend_elements() #@UnusedVariable\n uflabels=['Difference $\\\\leq {0}$'.format(minc)]\n oflabels=['${0} <$ Difference'.format(maxc)]\n artists = ufartists + ofartists\n labels = uflabels + oflabels\n plt.title(title)\n pts.set_ticks_to_zero()\n axbounds = ax.get_position()\n #Shrink box by 5%\n ax.set_position([axbounds.x0,axbounds.y0 + axbounds.height*0.05,\n axbounds.width,axbounds.height*0.95])\n ax.legend(artists,labels,loc='upper center',\n bbox_to_anchor=(0.5,-0.025),fancybox=True,ncol=2)\n #if self.save:\n #plt.savefig('something')\n print(\"Combined plot created\")",
"def show_plot(self):\n label_1 = (self.own_name_1 + \"'s account\")\n label_2 = (self.own_name_2 + \"'s account\")\n clusters = 3\n counts_1 = (self.op_full_name_count_1, self.op_first_name_count_1, self.op_last_name_count_1)\n counts_2 = (self.op_full_name_count_2, self.op_first_name_count_2, self.op_last_name_count_2)\n fig, ax = plt.subplots()\n index = np.arange(clusters)\n bar_width = 0.2\n opacity = 0.5\n rects1 = plt.bar(index, counts_1, bar_width, alpha=opacity, color=\"b\", label=label_1)\n rects2 = plt.bar(index + bar_width, counts_2, bar_width, alpha=opacity, color=\"g\", label=label_2)\n #plt.xlabel(\"Name forms\")\n plt.ylabel(\"Number of references\")\n plt.title(\"Reference of opponents name\")\n plt.xticks(index + bar_width, (\"Opponent's Full Name\", \"Opponent's First Name only\", \"Opponent's Last name only\"))\n plt.legend()\n plt.tight_layout()\n plt.show()",
"def _curve_labels(self, x_axis, sample, ylabel):\n return str(sample), x_axis.capitalize(), sample",
"def ploter(self):\n if len(self.dataset[self.first_title]) != 2:\n print('plot is only avilable for two features')\n return\n x_axis = []\n y_axis = []\n for title in self.dataset:\n x_axis.append(self.dataset[title][0])\n y_axis.append(self.dataset[title][1])\n plt.plot(x_axis, y_axis, 'o')\n plt.show()",
"def get_labels(self):\n if self.option == \"term\":\n return ['platform characteristics', 'atmospheric winds', 'radio wave','weather events', 'geomagnetism', 'atmospheric electricity','microwave', 'atmospheric temperature', 'atmospheric water vapor','atmospheric pressure', 'aerosols', 'atmospheric radiation','atmospheric chemistry', 'precipitation', 'sensor characteristics','radar', 'infrared wavelengths', 'visible wavelengths','weather/climate advisories', 'clouds', 'lidar', 'ocean optics','ultraviolet wavelengths', 'cryospheric indicators','land use/land cover', 'topography', 'surface thermal properties','spectral/engineering', 'soils', 'snow/ice', 'geothermal dynamics','natural hazards', 'surface water', 'vegetation','land surface/agriculture indicators','gravity/gravitational field', 'marine advisories', 'altitude','water quality/water chemistry', 'ocean temperature','ocean winds', 'atmospheric/ocean indicators', 'coastal processes','erosion/sedimentation', 'marine sediments', 'ocean chemistry','salinity/density', 'ocean color', 'aquatic ecosystems','vegetation2', 'landscape', 'cloud properties','surface radiative properties', 'geodetics','agricultural plant science', 'forest science','ecological dynamics', 'environmental impacts', 'sustainability','boundaries', 'ecosystems', 'air quality', 'population','infrastructure', 'environmental governance/management','public health', 'economic resources', 'socioeconomics','environmental vulnerability index (evi)', 'human settlements','agricultural chemicals', 'animal science','habitat conversion/fragmentation', 'animals/vertebrates','earth gases/liquids', 'rocks/minerals/crystals','social behavior', 'ground water', 'frozen ground','terrestrial hydrosphere indicators', 'ocean heat budget','biospheric indicators', 'animal commodities', 'fungi', 'plants','carbon flux', 'geomorphic landforms/processes','paleoclimate indicators', 'ocean circulation', 'sea ice','geochemistry', 'visualization/image processing','subsetting/supersetting', 'transformation/conversion','ocean pressure', 'glaciers/ice sheets', 'protists','solar activity', 'sun-earth interactions','sea surface topography', 'solar energetic particle properties','solar energetic particle flux','ionosphere/magnetosphere dynamics']\n elif self.option == \"mostdepth\":\n return ['flight data logs', 'turbulence', 'radio wave flux', 'lightning', 'magnetic field', 'atmospheric conductivity', 'electric field', 'data synchronization time', 'brightness temperature', 'vertical profiles', 'water vapor profiles', 'air temperature', 'upper level winds', 'atmospheric pressure measurements', 'upper air temperature', 'humidity', 'dew point temperature', 'aerosol particle properties', 'emissivity', 'trace gases/trace species', 'liquid precipitation', 'cloud liquid water/ice', 'microwave radiance', 'sensor counts', 'total pressure', 'airspeed/ground speed', 'total temperature', 'static pressure', 'wind speed', 'wind direction', 'radar reflectivity', 'doppler velocity', 'infrared imagery', 'visible imagery', 'water vapor', 'vertical wind velocity/speed', 'aerosol backscatter', 'weather forecast', 'tropical cyclones', 'visible radiance', 'infrared radiance', 'total precipitable water', 'boundary layer temperature', 'atmospheric temperature indices', 'cloud height', 'flight level winds', 'cloud droplet distribution', 'cloud droplet concentration/size', 'cloud condensation nuclei', 'cloud microphysics', 'hydrometeors', 'ozone', 'wind profiles', 'cloud base temperature', 'cloud base height', 'liquid water equivalent', 'solar radiation', 'planetary boundary layer height', 'surface winds', 'precipitation amount', 'precipitation rate', 'surface pressure', 'rain', 'cloud optical depth/thickness', 'aerosol extinction', 'aerosol optical depth/thickness', 'cirrus cloud systems', 'lidar depolarization ratio', 'radar backscatter', 'radar cross-section', 'return power', 'mean radial velocity', 'radiance', 'air quality', 'climate advisories', 'atmospheric emitted radiation', 'optical depth/thickness', 'surface temperature', 'ultraviolet flux', 'spectrum width', 'microwave imagery', 'lidar backscatter', 'relative humidity', 'u/v wind components', 'wind speed/wind direction', 'radar imagery', 'snow depth', 'land use/land cover classification', 'digital elevation/terrain model (dem)', 'snow', 'droplet size', 'droplet concentration/size', 'drizzle', 'precipitation anomalies', 'snow water equivalent', 'solid precipitation', 'total surface precipitation rate', 'particle size distribution', 'skin temperature', 'attitude characteristics', 'land surface temperature', 'hail', 'reflectance', 'soil moisture/water content', 'soil temperature', 'soil bulk density', 'surface roughness', 'present weather', 'snow density', 'ambient temperature', 'aerosol forward scatter', 'floods', 'snow cover', 'sigma naught', 'precipitable water', 'stage height', 'rivers/streams', 'shortwave radiation', 'photosynthetically active radiation', 'longwave radiation', 'net radiation', 'hourly precipitation amount', '24 hour precipitation amount', 'soil moisture', 'satellite orbits/revolution', 'sea surface temperature', 'heat flux', 'latent heat flux', 'cloud fraction', '3 and 6 hour precipitation amount', 'geopotential height', 'particulate matter', 'particle images', 'water vapor indices', 'horizontal wind velocity/speed', 'electrical conductivity', 'dissolved carbon dioxide', 'hurricanes', 'tropical cyclone track', 'convective clouds/systems (observed/analyzed)', 'cloud top height', 'viewing geometry', 'temperature profiles', 'vertical wind shear', 'wind shear', 'carbon monoxide', 'sea level pressure', 'water vapor tendency', 'potential temperature', 'angstrom exponent', 'ultraviolet radiation', 'solar irradiance', 'scattering', 'absorption', 'water vapor mixing ratio profiles', 'sea surface temperature indices', 'extreme eastern tropical pacific sst', 'sedimentation', 'erosion', 'sediment transport', 'sediments', 'tropopause', 'ocean chemistry', 'ocean optics', 'ocean temperature', 'salinity/density', 'pigments', 'ocean color', 'attenuation/transmission', 'inorganic carbon', 'organic carbon', 'photosynthetically available radiation', 'chlorophyll', 'optical depth', 'fluorescence', 'vegetation index', 'gelbstoff', 'phytoplankton', 'vegetation index2', 'cloud precipitable water', 'landscape ecology', 'ultraviolet radiance', 'cloud ceiling', 'aerosol radiance', 'carbonaceous aerosols', 'dust/ash/smoke', 'nitrate particles', 'organic particles', 'sulfate particles', 'radiative flux', 'transmittance', 'atmospheric stability', 'cloud asymmetry', 'cloud frequency', 'cloud top pressure', 'cloud top temperature', 'cloud vertical distribution', 'cloud emissivity', 'cloud radiative forcing', 'cloud reflectance', 'rain storms', 'reflected infrared', 'thermal infrared', 'incoming solar radiation', 'clouds', 'cloud properties', 'cloud types', 'orbital characteristics', 'sensor characteristics', 'maximum/minimum temperature', 'condensation', 'platform characteristics', 'geolocation', 'geodetics', 'coordinate reference system', 'aerosols', 'topographical relief maps', 'terrain elevation', 'normalized difference vegetation index (ndvi)', 'infrared flux', 'visible flux', 'albedo', 'land use/land cover', 'topography', 'lidar', 'lidar waveform', 'plant phenology', 'vegetation cover', 'crop/plant yields', 'land use classes', 'landscape patterns', 'forest harvesting and engineering', 'forest management', 'total surface water', 'agricultural plant science', 'photosynthesis', 'primary production', 'leaf characteristics', 'evapotranspiration', 'fire occurrence', 'surface thermal properties', 'canopy characteristics', 'evergreen vegetation', 'crown', 'deciduous vegetation', 'anisotropy', 'fire ecology', 'biomass burning', 'wildfires', 'topographical relief', 'burned area', 'surface radiative properties', 'environmental sustainability', 'boundaries', 'anthropogenic/human influenced ecosystems', 'emissions', 'sulfur dioxide', 'population', 'infrastructure', 'environmental assessments', 'public health', 'conservation', 'agriculture production', 'administrative divisions', 'economic resources', 'socioeconomics', 'lake/pond', 'rivers/stream', 'political divisions', 'environmental vulnerability index (evi)', 'ecosystems', 'urban areas', 'sustainability', 'treaty agreements/results', 'human settlements', 'population estimates', 'nitrogen dioxide', 'cropland', 'pasture', 'particulates', 'cyclones', 'mortality', 'environmental impacts', 'droughts', 'earthquakes', 'population distribution', 'fertilizers', 'animal manure and waste', 'urbanization/urban sprawl', 'landslides', 'avalanche', 'urban lands', 'mangroves', 'volcanic eruptions', 'pesticides', 'population size', 'population density', 'lakes/reservoirs', 'surface water', 'rural areas', 'infant mortality rates', 'amphibians', 'mammals', 'carbon', 'sulfur oxides', 'methane', 'non-methane hydrocarbons/volatile organic compounds', 'nitrogen oxides', 'natural gas', 'coal', 'coastal elevation', 'biodiversity functions', 'nuclear radiation exposure', 'radiation exposure', 'poverty levels', 'malnutrition', 'wetlands', 'sea level rise', 'vulnerability levels/index', 'ground water', 'snow/ice', 'electricity', 'energy production/use', 'sustainable development', 'deforestation', 'household income', 'discharge/flow', 'hydropattern', 'nitrogen', 'phosphorus', 'carbon dioxide', 'alpine/tundra', 'forests', 'vegetation', 'permafrost', 'nutrients', 'plant characteristics', 'leaf area index (lai)', 'soil gas/air', 'ammonia', 'nitrous oxide', 'ecosystem functions', 'litter characteristics', 'soil chemistry', 'soil respiration', 'active layer', 'soil depth', 'cation exchange capacity', 'organic matter', 'soil porosity', 'soil texture', 'permafrost melt', 'land subsidence', 'freeze/thaw', 'surface water features', 'chlorinated hydrocarbons', 'methyl bromide', 'methyl chloride', 'molecular hydrogen', 'sulfur compounds', 'fire models', 'biomass', 'dominant species', 'vegetation species', 'sulfur', 'tree rings', 'soil classification', 'heat index', 'sea ice concentration', 'ocean heat budget', 'reforestation', 'even-toed ungulates', 'species recruitment', 'population dynamics', 'range changes', 'topographic effects', 'land resources', 'river ice depth/extent', 'snow melt', 'river ice', 'animal commodities', 'animal ecology and behavior', 'phenological changes', 'water depth', 'inundation', 'forest fire science', 'biogeochemical cycles', 'radiative forcing', 'soil heat budget', 'drainage', 'respiration rate', 'river/lake ice breakup', 'river/lake ice freeze', 'reclamation/revegetation/restoration', 'permafrost temperature', 'indigenous/native species', 'fire dynamics', 'lichens', 'plants', 'plant succession', 'carbon flux', 'coastal', 'salt marsh', 'degradation', 'altitude', 'carbon and hydrocarbon compounds', 'halocarbons and halogens', 'forest composition/vegetation structure', 'water vapor indicators', 'barometric altitude', 'atmospheric water vapor', 'terrestrial ecosystems', 'volatile organic compounds', 'boundary layer winds', 'forest fire danger index', 'periglacial processes', 'landscape processes', 'evaporation', 'soil horizons/profile', 'shrubland/scrub', 'soil ph', 'soils', 'soil water holding capacity', 'community structure', 'pingo', 'soil color', 'virtual temperature', 'formaldehyde', 'hydroxyl', 'photolysis rates', 'cloud dynamics', 'nitric oxide', 'molecular oxygen', 'smog', 'peroxyacyl nitrate', 'hydrogen compounds', 'nitrogen compounds', 'oxygen compounds', 'stable isotopes', 'chemical composition', 'actinic flux', 'tropospheric ozone', 'fossil fuel burning', 'industrial emissions', 'denitrification rate', 'sunshine', 'runoff', 'soil structure', 'mosses/hornworts/liverworts', 'peatlands', 'hydraulic conductivity', 'snow/ice temperature', 'vegetation water content', 'discharge', 'chlorophyll concentrations', 'outgoing longwave radiation', 'geomorphic landforms/processes', 'soil compaction', 'soil impedance', 'canopy transmittance', 'water table', 'decomposition', 'water temperature', 'dissolved gases', 'total dissolved solids', 'agricultural expansion', 'forest science', 'pressure tendency', 'visibility', 'biomass dynamics', 'agricultural lands', 'grasslands', 'savannas', 'grazing dynamics/plant herbivory', 'herbivory', 'paleoclimate reconstructions', 'drought indices', 'fire weather index', 'animal yields', 'multivariate enso index', 'dissolved solids', 'ocean currents', 'salinity', 'coastal processes', 'atmospheric pressure', 'afforestation/reforestation', 'fresh water river discharge', 'surface water chemistry', 'drainage basins', 'resource development site', 'dunes', 'flood plain', 'endangered species', 'precipitation indices', 'temperature indices', 'forest yields', 'stratigraphic sequence', 'freeze/frost', 'frost', 'hydrogen cyanide', 'land management', 'nutrient cycling', 'industrialization', 'suspended solids', 'deserts', 'weathering', 'gas flaring', 'atmospheric temperature', 'ice extent', 'fraction of absorbed photosynthetically active radiation (fapar)', 'marshes', 'swamps', 'lake ice', 'atmospheric winds', 'watershed characteristics', 'transportation', 'soil rooting depth', 'isotopes', 'cultural features', 'consumer behavior', 'boundary surveys', 'aquifers', 'land productivity', 'water quality/water chemistry', 'sediment composition', 'dissolved oxygen', 'surface water processes/measurements', 'turbidity', 'conductivity', 'ph', 'calcium', 'magnesium', 'potassium', 'micronutrients/trace elements', 'social behavior', 'sulfate', 'sediment chemistry', 'biogeochemical processes', 'water ion concentrations', 'cropping systems', 'percolation', 'groundwater chemistry', 'reforestation/revegetation', 'species/population interactions', 'soil infiltration', 'alkalinity', 'soil fertility', 'phosphorous compounds', 'radioisotopes', 'cooling degree days', 'angiosperms (flowering plants)', 'glacial landforms', 'glacial processes', 'contour maps', 'estuaries', 'methane production/use', 'natural gas production/use', 'petroleum production/use', 'visualization/image processing', 'subsetting/supersetting', 'transformation/conversion', 'forest mensuration', 'acid deposition', 'differential pressure', 'precipitation', 'marine ecosystems', 'consumption rates', 'radio wave', 'soil organic carbon (soc)', 'soil erosion', 'halocarbons', 'trace elements/trace metals', 'biomass energy production/use', 'riparian wetlands', 'soil consistence', 'snow stratigraphy', 'thermal conductivity', 'estuary', 'tidal height', 'plant diseases/disorders/pests', 'layered precipitable water', 'atmospheric chemistry', 'water vapor concentration profiles', 'specific humidity', 'total runoff', 'pressure thickness', 'wind stress', 'atmospheric heating', 'conduction', 'hydrogen chloride', 'nitric acid', 'radar', 'land surface/agriculture indicators', 'satellite soil moisture index', 'chlorine nitrate', 'chlorofluorocarbons', 'dinitrogen pentoxide', 'antenna temperature', 'glaciers', 'ice sheets', 'dimethyl sulfide', 'potential vorticity', 'ice fraction', 'atmospheric radiation', 'runoff rate', 'temperature tendency', 'wind dynamics', 'wind direction tendency', 'base flow', 'bromine monoxide', 'chlorine monoxide', 'methyl cyanide', 'hypochlorous acid', 'methanol', 'hydroperoxy', 'cloud base pressure', 'temperature anomalies', 'nitrate', 'ocean mixed layer', 'precipitation trends', 'temperature trends', 'convection', 'ground ice', 'oxygen', 'phosphate', 'solar induced fluorescence', 'chlorine dioxide', 'sun-earth interactions', 'uv aerosol index', 'volcanic activity', 'potential evapotranspiration', 'ultraviolet wavelengths', 'ice temperature', 'sea surface skin temperature', 'sea surface height', 'sublimation', 'convective surface precipitation rate', 'hydrogen fluoride', 'airglow', 'energy deposition', 'x-ray flux', 'electron flux', 'proton flux', 'magnetic fields/magnetic currents']\n else:\n return ['platform characteristics', 'atmospheric winds','radio wave', 'weather events', 'geomagnetism','atmospheric electricity', 'microwave', 'atmospheric temperature','atmospheric water vapor', 'atmospheric pressure', 'aerosols','atmospheric radiation', 'atmospheric chemistry', 'precipitation','sensor characteristics', 'radar', 'infrared wavelengths','visible wavelengths', 'weather/climate advisories', 'clouds','lidar', 'ocean optics', 'ultraviolet wavelengths','cryospheric indicators', 'land use/land cover', 'topography','surface thermal properties', 'spectral/engineering', 'soils','snow/ice', 'geothermal dynamics', 'natural hazards','surface water', 'vegetation','land surface/agriculture indicators','gravity/gravitational field', 'marine advisories', 'altitude','water quality/water chemistry', 'ocean temperature','ocean winds', 'atmospheric/ocean indicators', 'coastal processes','erosion/sedimentation', 'marine sediments', 'ocean chemistry','salinity/density', 'ocean color', 'aquatic ecosystems','vegetation2', 'landscape', 'cloud properties','surface radiative properties', 'geodetics','agricultural plant science', 'forest science','ecological dynamics', 'environmental impacts', 'sustainability','boundaries', 'ecosystems', 'air quality', 'population','infrastructure', 'environmental governance/management','public health', 'economic resources', 'socioeconomics','environmental vulnerability index (evi)', 'human settlements','agricultural chemicals', 'animal science','habitat conversion/fragmentation', 'animals/vertebrates','earth gases/liquids', 'rocks/minerals/crystals','social behavior', 'ground water', 'frozen ground','terrestrial hydrosphere indicators', 'ocean heat budget','biospheric indicators', 'animal commodities', 'fungi', 'plants','carbon flux', 'geomorphic landforms/processes','paleoclimate indicators', 'ocean circulation', 'sea ice','geochemistry', 'visualization/image processing','subsetting/supersetting', 'transformation/conversion','ocean pressure', 'glaciers/ice sheets', 'protists','solar activity', 'sun-earth interactions','sea surface topography', 'solar energetic particle properties','solar energetic particle flux','ionosphere/magnetosphere dynamics','flight data logs','wind dynamics', 'radio wave flux', 'lightning', 'magnetic field','atmospheric conductivity', 'electric field','data synchronization time', 'brightness temperature','upper air temperature', 'water vapor profiles','surface temperature', 'upper level winds','atmospheric pressure measurements', 'water vapor indicators','aerosol particle properties', 'emissivity','trace gases/trace species', 'liquid precipitation','cloud microphysics', 'microwave radiance', 'sensor counts','total pressure', 'airspeed/ground speed', 'total temperature','static pressure', 'humidity', 'radar reflectivity','doppler velocity', 'infrared imagery', 'visible imagery','aerosol backscatter', 'weather forecast', 'tropical cyclones','visible radiance', 'infrared radiance','atmospheric temperature indices', 'cloud droplet distribution','cloud condensation nuclei', 'hydrometeors', 'oxygen compounds','wind profiles', 'liquid water equivalent', 'solar radiation','planetary boundary layer height', 'surface winds','precipitation amount', 'precipitation rate', 'surface pressure','aerosol extinction', 'aerosol optical depth/thickness','tropospheric/high-level clouds (observed/analyzed)','lidar depolarization ratio', 'radar backscatter','radar cross-section', 'return power', 'radial velocity','radiance', 'climate advisories', 'atmospheric emitted radiation','optical depth/thickness', 'ultraviolet flux', 'spectrum width','microwave imagery', 'lidar backscatter', 'radar imagery','snow depth', 'land use/land cover classification','terrain elevation', 'solid precipitation', 'droplet size','droplet concentration/size', 'precipitation anomalies','snow water equivalent', 'total surface precipitation rate','skin temperature', 'water vapor', 'attitude characteristics','land surface temperature', 'reflectance','soil moisture/water content', 'soil temperature','soil bulk density', 'surface roughness', 'present weather','snow density', 'geothermal temperature','aerosol forward scatter', 'floods', 'snow cover', 'sigma naught','precipitable water', 'surface water processes/measurements','surface water features', 'shortwave radiation','photosynthetically active radiation', 'longwave radiation','net radiation', 'flight level winds', 'soil moisture','satellite orbits/revolution', 'heat flux','precipitation profiles', 'geopotential height','particulate matter', 'particle images', 'water vapor indices','electrical conductivity', 'gases', 'sea surface temperature','convective clouds/systems (observed/analyzed)','viewing geometry', 'wind shear','carbon and hydrocarbon compounds', 'sea level pressure','water vapor processes', 'ultraviolet radiation','solar irradiance', 'scattering', 'absorption','sea surface temperature indices', 'sedimentation', 'erosion','sediment transport', 'sediments', 'tropopause', 'nan', 'pigments','attenuation/transmission', 'inorganic carbon', 'organic carbon','photosynthetically available radiation', 'chlorophyll','optical depth', 'fluorescence', 'vegetation index', 'gelbstoff','plankton', 'vegetation index2', 'landscape ecology','ultraviolet radiance', 'aerosol radiance','carbonaceous aerosols', 'dust/ash/smoke', 'nitrate particles','organic particles', 'sulfate particles', 'radiative flux','transmittance', 'atmospheric stability','cloud radiative transfer', 'rain storms', 'reflected infrared','thermal infrared', 'incoming solar radiation', 'cloud types','orbital characteristics', 'geolocation','coordinate reference system', 'infrared flux', 'visible flux','albedo', 'lidar waveform', 'plant phenology', 'vegetation cover','crop/plant yields', 'land use classes', 'landscape patterns','forest harvesting and engineering', 'forest management','ecosystem functions', 'leaf characteristics', 'fire ecology','total surface water', 'primary production', 'photosynthesis','canopy characteristics', 'evergreen vegetation', 'crown','deciduous vegetation', 'anisotropy', 'biomass burning','wildfires', 'topographical relief','environmental sustainability','anthropogenic/human influenced ecosystems', 'emissions','sulfur compounds', 'environmental assessments', 'conservation','agriculture production', 'administrative divisions','freshwater ecosystems', 'political divisions', 'urban areas','treaty agreements/results', 'population estimates','nitrogen compounds', 'particulates', 'mortality', 'droughts','earthquakes', 'population distribution', 'fertilizers','animal manure and waste', 'urbanization/urban sprawl','landslides', 'avalanche', 'mangroves', 'volcanic eruptions','pesticides', 'population size', 'population density','rural areas', 'amphibians', 'mammals', 'carbon', 'sulfur oxides','land management', 'natural gas', 'sedimentary rocks','coastal elevation', 'community dynamics','nuclear radiation exposure', 'radiation exposure','poverty levels', 'malnutrition', 'sea level rise','vulnerability levels/index', 'electricity','energy production/use', 'sustainable development','deforestation', 'household income', 'nitrogen', 'phosphorus','terrestrial ecosystems', 'permafrost', 'nutrients','plant characteristics', 'soil gas/air', 'litter characteristics','soil chemistry', 'soil respiration', 'active layer', 'soil depth','cation exchange capacity', 'organic matter', 'soil porosity','soil texture', 'permafrost melt','ground water processes/measurements', 'freeze/thaw','halocarbons and halogens', 'hydrogen compounds', 'biomass','dominant species', 'vegetation species', 'sulfur', 'tree rings','soil classification', 'sea ice concentration', 'reforestation','species/population interactions', 'range changes','topographic effects', 'land resources', 'river ice depth/extent','snow melt', 'river ice', 'animal ecology and behavior','phenological changes', 'forest fire science', 'radiative forcing','soil heat budget', 'river/lake ice breakup','river/lake ice freeze', 'reclamation/revegetation/restoration','lichens', 'marine ecosystems', 'coastal landforms', 'degradation','forest composition/vegetation structure', 'barometric altitude','volatile organic compounds', 'forest fire danger index','periglacial processes', 'landscape processes','soil horizons/profile', 'soil ph', 'soil water holding capacity','fluvial landforms', 'soil color', 'glacial processes','photochemistry', 'cloud dynamics', 'nitrogen oxides', 'smog','chemical composition', 'actinic flux', 'tropospheric ozone','fossil fuel burning', 'industrial emissions','denitrification rate', 'sunshine', 'soil structure','mosses/hornworts/liverworts', 'hydraulic conductivity','snow/ice temperature', 'water characteristics','outgoing longwave radiation', 'soil compaction', 'soil impedance','canopy transmittance', 'ground water features', 'solids','agricultural expansion', 'pressure tendency', 'visibility','herbivory', 'paleoclimate reconstructions', 'drought indices','fire weather index', 'animal yields', 'teleconnections','carbon dioxide', 'dissolved solids', 'ocean currents', 'salinity','afforestation/reforestation', 'fresh water river discharge','surface water chemistry', 'aeolian landforms','precipitation indices', 'temperature indices', 'forest yields','stratigraphic sequence', 'freeze/frost', 'frost','industrialization', 'ice core records', 'suspended solids','weathering', 'gas flaring', 'ice extent', 'biogeochemical cycles','lake ice', 'isotopes', 'watershed characteristics','transportation', 'soil rooting depth', 'geochemical properties','carbon monoxide', 'cultural features', 'consumer behavior','boundary surveys', 'land productivity', 'sediment composition','calcium', 'magnesium', 'potassium','micronutrients/trace elements', 'sediment chemistry','biogeochemical processes', 'cropping systems','groundwater chemistry', 'reforestation/revegetation','soil infiltration', 'soil fertility','angiosperms (flowering plants)', 'glacial landforms','forest mensuration', 'acid deposition', 'differential pressure','soil erosion', 'trace elements/trace metals', 'soil consistence','snow stratigraphy', 'thermal conductivity', 'estuaries','tidal height', 'plant diseases/disorders/pests','pressure thickness', 'atmospheric heating', 'conduction','evaporation', 'turbulence', 'wind stress','satellite soil moisture index', 'antenna temperature', 'glaciers','ice sheets', 'nitrate', 'ocean mixed layer','precipitation indicators', 'temperature indicators', 'ground ice','alkalinity', 'dissolved gases', 'oxygen', 'ph', 'phosphate','solar induced fluorescence', 'volcanic activity','ice temperature', 'sea surface height', 'airglow','energy deposition', 'x-ray flux', 'electron flux', 'proton flux','magnetic fields/magnetic currents', 'vertical profiles','air temperature', 'dew point temperature','cloud liquid water/ice', 'wind speed', 'wind direction','vertical wind velocity/speed', 'total precipitable water','boundary layer temperature', 'cloud height','cloud droplet concentration/size', 'ozone','cloud base temperature', 'cloud base height', 'rain','cloud optical depth/thickness', 'cirrus/systems','mean radial velocity', 'relative humidity', 'u/v wind components','wind speed/wind direction','digital elevation/terrain model (dem)', 'snow', 'drizzle','particle size distribution', 'hail', 'ambient temperature','stage height', 'rivers/streams', 'hourly precipitation amount','24 hour precipitation amount', 'latent heat flux','cloud fraction', '3 and 6 hour precipitation amount','horizontal wind velocity/speed', 'dissolved carbon dioxide','hurricanes', 'tropical cyclone track', 'cloud top height','temperature profiles', 'vertical wind shear','water vapor tendency', 'potential temperature','angstrom exponent', 'water vapor mixing ratio profiles','extreme eastern tropical pacific sst', 'phytoplankton','cloud precipitable water', 'cloud asymmetry', 'cloud ceiling','cloud frequency', 'cloud top pressure', 'cloud top temperature','cloud vertical distribution', 'cloud emissivity','cloud radiative forcing', 'cloud reflectance','maximum/minimum temperature', 'condensation','topographical relief maps', 'evapotranspiration','fire occurrence', 'burned area', 'sulfur dioxide', 'lake/pond','rivers/stream', 'nitrogen dioxide', 'agricultural lands','cyclones', 'urban lands', 'lakes/reservoirs','infant mortality rates', 'methane','non-methane hydrocarbons/volatile organic compounds', 'coal','biodiversity functions', 'wetlands', 'discharge/flow','hydropattern', 'alpine/tundra', 'forests','leaf area index (lai)', 'ammonia', 'nitrous oxide','land subsidence', 'normalized difference vegetation index (ndvi)','chlorinated hydrocarbons', 'methyl bromide', 'methyl chloride','molecular hydrogen', 'fire models', 'heat index','even-toed ungulates', 'species recruitment','population dynamics', 'water depth', 'inundation', 'drainage','respiration rate', 'permafrost temperature','indigenous/native species', 'fire dynamics', 'plant succession','coastal', 'salt marsh', 'boundary layer winds', 'shrubland/scrub','community structure', 'pingo', 'virtual temperature','formaldehyde', 'hydroxyl', 'photolysis rates', 'nitric oxide','molecular oxygen', 'peroxyacyl nitrate', 'stable isotopes','runoff', 'vegetation water content', 'discharge','chlorophyll concentrations', 'water table', 'decomposition','water temperature', 'total dissolved solids', 'biomass dynamics','grasslands', 'savannas', 'grazing dynamics/plant herbivory','multivariate enso index', 'drainage basins','resource development site', 'dunes', 'flood plain','endangered species', 'hydrogen cyanide', 'nutrient cycling','deserts','fraction of absorbed photosynthetically active radiation (fapar)','aquifers', 'dissolved oxygen', 'turbidity', 'conductivity','sulfate', 'water ion concentrations', 'percolation','phosphorous compounds', 'radioisotopes', 'cooling degree days','contour maps', 'methane production/use','natural gas production/use', 'petroleum production/use','consumption rates', 'soil organic carbon (soc)', 'halocarbons','biomass energy production/use', 'estuary','layered precipitable water', 'water vapor concentration profiles','hydrogen chloride', 'nitric acid', 'chlorine nitrate','chlorofluorocarbons', 'dinitrogen pentoxide', 'dimethyl sulfide','vorticity', 'ice fraction', 'temperature tendency','wind direction tendency', 'bromine monoxide', 'chlorine monoxide','methyl cyanide', 'hypochlorous acid', 'methanol', 'hydroperoxy','cloud base pressure', 'temperature anomalies','precipitation trends', 'temperature trends', 'convection','chlorine dioxide', 'uv aerosol index','sea surface skin temperature', 'sublimation','convective surface precipitation rate', 'hydrogen fluoride']",
"def lyft_labels():\n\n return {\n 0: 'None',\n 7: 'Roads',\n 10: 'Vehicles'\n }",
"def get_label(self, hierarchy: List[str]) -> Any:",
"def user_labels_second(*args):\n return _ida_hexrays.user_labels_second(*args)",
"def get_labels(self):\r\n return None",
"def _get_l2_label(self):\n return self.__l2_label",
"def get_labels(self):\n data = list(IgnitionRow.objects.all().order_by('-pub_date')[:self.num_ticks].values())\n two_hours = reversed(data)\n two_hours = [str(elem['pub_date'])[10:19] for elem in two_hours]\n return two_hours",
"def get_labels(self):\n data = list(IgnitionRow.objects.all().order_by('-pub_date')[:self.num_ticks].values())\n two_hours = reversed(data)\n two_hours = [str(elem['pub_date'])[10:19] for elem in two_hours]\n return two_hours",
"def get_labels(self):\n data = list(IgnitionRow.objects.all().order_by('-pub_date')[:self.num_ticks].values())\n two_hours = reversed(data)\n two_hours = [str(elem['pub_date'])[10:19] for elem in two_hours]\n return two_hours"
]
| [
"0.62116903",
"0.6029856",
"0.6003597",
"0.59680206",
"0.5843733",
"0.5718131",
"0.56721735",
"0.5667398",
"0.5667398",
"0.56606805",
"0.5659642",
"0.5647714",
"0.56390446",
"0.5638669",
"0.5621482",
"0.56093115",
"0.55882025",
"0.5574133",
"0.55654013",
"0.555358",
"0.5550554",
"0.5540116",
"0.55164975",
"0.5508514",
"0.5465427",
"0.5447272",
"0.54450965",
"0.5431601",
"0.5431601",
"0.5431601"
]
| 0.7466757 | 0 |
Make a plot of the waiting/blocking time between two hospitals that have a joint arrival rate of ambulance patients. In other words plots the waiting / blocking times of patients based on how the ambulance patients are distributed among hospitals | def make_plot_two_hospitals_arrival_split(
lambda_2,
lambda_1_1,
lambda_1_2,
mu_1,
mu_2,
num_of_servers_1,
num_of_servers_2,
threshold_1,
threshold_2,
measurement_type="b",
seed_num_1=None,
seed_num_2=None,
warm_up_time=100,
trials=1,
accuracy=10,
runtime=1440,
):
hospital_times_1 = []
hospital_times_2 = []
all_arrival_rates = np.linspace(0, lambda_2, accuracy + 1)
for arrival_rate_1 in all_arrival_rates[1:-1]:
arrival_rate_2 = lambda_2 - arrival_rate_1
times_1 = get_multiple_runs_results(
arrival_rate_1,
lambda_1_1,
mu_1,
num_of_servers_1,
threshold_1,
seed_num_1,
warm_up_time,
trials,
runtime,
)
times_2 = get_multiple_runs_results(
arrival_rate_2,
lambda_1_2,
mu_2,
num_of_servers_2,
threshold_2,
seed_num_2,
warm_up_time,
trials,
runtime,
)
hospital_times_1, hospital_times_2 = update_hospitals_lists(
hospital_times_1, hospital_times_2, times_1, times_2, measurement_type
)
x_axis_label, y_axis_label, title = get_two_hospital_plot_labels(measurement_type)
x_labels = all_arrival_rates[1:-1] / all_arrival_rates[-1]
plt.figure(figsize=(23, 10))
waiting_time_plot = plt.plot(x_labels, hospital_times_1, ls="solid", lw=1.5)
plt.plot(x_labels, hospital_times_2, ls="solid", lw=1.5)
plt.legend(["Hospital 1", "Hospital 2"], fontsize="x-large")
plt.title(title, fontsize=18)
plt.xlabel(x_axis_label, fontsize=15, fontweight="bold")
plt.ylabel(y_axis_label, fontsize=15, fontweight="bold")
return waiting_time_plot | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def comp_time_plot(p1=database['K+'], p2=database['pi+'], pmax=80, plot=True):\r\n dt = []\r\n p_range = np.linspace(10, pmax, 1000)\r\n m1 = p1.mass\r\n m2 = p2.mass\r\n for p in p_range:\r\n t1_per_m = 76.273/(beta(p, m1)*gamma(p, m1)*c)\r\n t2_per_m = 76.273/(beta(p, m2)*gamma(p, m2)*c)\r\n dt.append(abs(t1_per_m - t2_per_m)*1e12)\r\n dt_12_5 = dt[np.argmin(abs(p_range-12.5))]\r\n dt_75 = dt[np.argmin(abs(p_range-75))]\r\n ratio = dt_12_5/dt_75\r\n if plot==True:\r\n fig = plt.figure(figsize=[10, 5])\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.plot(p_range, dt, 'b', label=r'$\\Delta t$')\r\n ax.axvline(12.5, color='r', label='p=12.5 GeV')\r\n ax.axvline(75, color='g', label='p=75 GeV')\r\n ax.set_xlim(10, pmax)\r\n ax.set_ylim(0)\r\n ax.set_xlabel('p / GeV', fontsize=20)\r\n# ax.set_yscale('log')\r\n ax.set_ylabel(r'$\\Delta t$ / ps', fontsize=20)\r\n title = f'{p1.name} to {p2.name} '\r\n title += r'$\\Delta t$ dependancy on particle momenta'\r\n ax.set_title(title, fontsize=20)\r\n ax.legend(fontsize=20)\r\n text = 'dt(12.5) = {0:.2f} ps, '.format(dt_12_5)\r\n text += 'dt(75) = {0:.2f} ps, '.format(dt_75)\r\n text += 'ratio = {0:.3f}'.format(ratio)\r\n plt.show()\r\n print(text)\r\n return [dt_12_5, dt_75, ratio]",
"def plot_joint_angles(t_start,t_stop):\n [time,\n ankle_l_trajectory,\n ankle_r_trajectory,\n foot_l_contact,\n foot_r_contact,\n muscle_lh_activations,\n muscle_rh_activations,\n muscle_lh_forces,\n muscle_rh_forces,\n joint_lh_positions,\n joint_rh_positions] = load_data()\n \n index_start = np.where(time == t_start)[0][0]\n index_end = np.where(time == t_stop)[0][0]\n \n time_plot = time[index_start:index_end+1]\n joint_lh_positions = joint_lh_positions[index_start:index_end+1,:]\n joint_rh_positions = joint_rh_positions[index_start:index_end+1,:]\n\n # Example to plot joint trajectories.\n # Feel free to change or use your own plot tools\n plt.figure()\n plt.subplot(3,1,1)\n plt.plot(time_plot, np.rad2deg(joint_lh_positions[:, 0]))\n plt.plot(time_plot, np.rad2deg(joint_rh_positions[:, 0]))\n plt.ylabel('Hip Angle [deg]')\n plt.legend(['Left','Right'],loc='upper right')\n plt.grid('on')\n plt.subplot(3,1,2)\n plt.plot(time_plot, np.rad2deg(joint_lh_positions[:, 1]))\n plt.plot(time_plot, np.rad2deg(joint_rh_positions[:, 1]))\n plt.ylabel('Knee Angle [deg]')\n plt.legend(['Left','Right'],loc='upper right')\n plt.grid('on')\n plt.subplot(3,1,3)\n plt.plot(time_plot, np.rad2deg(joint_lh_positions[:, 2]))\n plt.plot(time_plot, np.rad2deg(joint_rh_positions[:, 2]))\n plt.grid('on')\n plt.ylabel('Ankle Angle [deg]')\n plt.legend(['Left','Right'],loc='upper right')\n plt.xlabel('Time [s]')\n\n return",
"def plot_joint_angles_with_contact(t_start,t_stop):\n [time,\n ankle_l_trajectory,\n ankle_r_trajectory,\n foot_l_contact,\n foot_r_contact,\n muscle_lh_activations,\n muscle_rh_activations,\n muscle_lh_forces,\n muscle_rh_forces,\n joint_lh_positions,\n joint_rh_positions] = load_data()\n \n index_start = np.where(time == t_start)[0][0]\n index_end = np.where(time == t_stop)[0][0]\n \n time_plot = time[index_start:index_end+1]\n joint_lh_positions = joint_lh_positions[index_start:index_end+1,:]\n joint_rh_positions = joint_rh_positions[index_start:index_end+1,:]\n \n gait = foot_r_contact[index_start:index_end+1,:]\n foot_l_contact = foot_l_contact[index_start:index_end+1,:]\n \n# gait = np.hstack((foot_r_contact, foot_l_contact))\n\n # Example to plot joint trajectories.\n # Feel free to change or use your own plot tools\n plt.figure()\n ax = plt.subplot(3,1,1)\n# plt.plot(time_plot, np.rad2deg(joint_lh_positions[:, 0]))\n plt.plot(time_plot, np.rad2deg(joint_rh_positions[:, 0]),'r')\n plt.ylabel('Hip Angle [deg]')\n# plt.legend(['Left','Right'],loc='upper right')\n plt.grid('on')\n for t, g in enumerate(gait):\n for l, gait_l in enumerate(g):\n if gait_l:\n add_patch(ax, time_plot[t], -300, width=0.01, height=150)\n ax = plt.subplot(3,1,2)\n# plt.plot(time_plot, np.rad2deg(joint_lh_positions[:, 1]))\n plt.plot(time_plot, np.rad2deg(joint_rh_positions[:, 1]),'r')\n plt.ylabel('Knee Angle [deg]')\n# plt.legend(['Left','Right'],loc='upper right')\n plt.grid('on')\n for t, g in enumerate(gait):\n for l, gait_l in enumerate(g):\n if gait_l:\n add_patch(ax, time_plot[t], -600, width=0.01, height=150)\n ax = plt.subplot(3,1,3)\n# plt.plot(time_plot, np.rad2deg(joint_lh_positions[:, 2]))\n plt.plot(time_plot, np.rad2deg(joint_rh_positions[:, 2]),'r')\n plt.grid('on')\n for t, g in enumerate(gait):\n for l, gait_l in enumerate(g):\n if gait_l:\n add_patch(ax, time_plot[t], -300, width=0.01, height=150)\n plt.ylabel('Ankle Angle [deg]')\n# plt.legend(['Left','Right'],loc='upper right')\n plt.xlabel('Time [s]')\n\n return",
"def task_2():\n\n # To store the list of speeds to plot\n list_of_speeds = []\n list_of_times = []\n list_of_time_difference = [0]\n\n # To go from 1 through 80\n for i in range(LOW_SPEED, HIGH_SPEED + 1, 5):\n list_of_speeds.append(i)\n list_of_times.append(((DISTANCE / i) * 60))\n\n for i in range(1, len(list_of_times)):\n list_of_time_difference.append(list_of_times[i-1] - list_of_times[i])\n\n plt.plot(list_of_speeds, list_of_time_difference)\n plt.xlabel(\"Speed (in mph)\")\n plt.ylabel(\"Time saved (in minutes)\")\n plt.show()",
"def estimate_tr_slot(br_data_df, fab_started_at, leeway, agent_df):\r\n a = br_data_df.loc[0, 'AVG(ca_op_time)']\r\n b = br_data_df.loc[0, 'AVG(tr_op_time)']\r\n # br_data_df.loc[0, 'AVG(ca_op_time)'] == 9:\r\n #if br_data_df.loc[0, 'AVG(tr_op_time)'] == 3.5: # if these 2 conditions are met, with high prob we are in first ever auction.\r\n #fab_started_at = datetime.datetime.now()\r\n #auction_total_time = 2 # auction estimated total time = 2 min\r\n # slot_1_start = datetime.datetime.now() + datetime.timedelta(minutes=int(auction_total_time)) - datetime.timedelta(minutes=int(br_data_df.loc[0, 'AVG(tr_op_time)'])) - leeway # time when on going fab started + mean ca processing time - mean tr operation time - margin.\r\n # slot_1_end = slot_1_start + datetime.timedelta(minutes=int(br_data_df.loc[0, 'AVG(tr_op_time)'])) # time when on going fab started + mean tr operation time\r\n # slot_2_start = ca_estimated_end + datetime.timedelta(minutes=int(br_data_df.loc[0, 'AVG(ca_op_time)'])) - leeway # time when on going fab started + mean ca processing time - margin\r\n # slot_2_end = slot_2_start + datetime.timedelta(minutes=int(br_data_df.loc[0, 'AVG(tr_op_time)'])) # time when on going fab started + mean ca processing time + mean tr operation time\r\n ca_estimated_end = fab_started_at + datetime.timedelta(minutes=int(br_data_df.loc[0, 'AVG(ca_op_time)'])) # time when on going fab started + mean ca processing time.\r\n if br_data_df.loc[0, 'AVG(ca_op_time)'] == 9:\r\n if br_data_df.loc[0, 'AVG(tr_op_time)'] == 3.5:\r\n slot_1_start = ca_estimated_end - datetime.timedelta(minutes=int(br_data_df.loc[0, 'AVG(tr_op_time)'])) - (leeway / 2)\r\n slot_1_end = ca_estimated_end + (leeway / 2)\r\n\r\n slot_1_start = ca_estimated_end - datetime.timedelta(minutes=int(br_data_df.loc[0, 'AVG(tr_op_time)'])) - (leeway / 2)\r\n slot_1_end = ca_estimated_end + (leeway / 2)\r\n slot_2_start = ca_estimated_end + datetime.timedelta(minutes=int(br_data_df.loc[0, 'AVG(ca_op_time)'])) - datetime.timedelta(minutes=int(br_data_df.loc[0, 'AVG(tr_op_time)']/2)) - (leeway / 2)\r\n slot_2_end = ca_estimated_end + datetime.timedelta(minutes=int(br_data_df.loc[0, 'AVG(ca_op_time)'])) + datetime.timedelta(minutes=int(br_data_df.loc[0, 'AVG(tr_op_time)']/2)) + (leeway / 2) # time when on going fab started + mean ca processing time + mean tr operation time\r\n ca_to_tr_df = pd.DataFrame([], columns=['id', 'agent_type', 'location_1', 'location_2', 'location', 'purpose', 'request_type', 'action', 'time', 'slot_1_start', 'slot_1_end', 'slot_2_start', 'slot_2_end', 'slot'])\r\n ca_to_tr_df.at[0, 'id'] = agent_df.loc[0, 'id']\r\n ca_to_tr_df.at[0, 'agent_type'] = agent_df.loc[0, 'agent_type']\r\n ca_to_tr_df.at[0, 'location_1'] = agent_df.loc[0, 'location_1']\r\n ca_to_tr_df.at[0, 'location_2'] = agent_df.loc[0, 'location_2']\r\n ca_to_tr_df.at[0, 'location'] = agent_df.loc[0, 'location']\r\n ca_to_tr_df.at[0, 'purpose'] = \"request\"\r\n ca_to_tr_df.at[0, 'slot_1_start'] = slot_1_start\r\n ca_to_tr_df.at[0, 'slot_1_end'] = slot_1_end\r\n ca_to_tr_df.at[0, 'slot_2_start'] = slot_2_start\r\n ca_to_tr_df.at[0, 'slot_2_end'] = slot_2_end\r\n this_time = datetime.datetime.now()\r\n ca_to_tr_df.at[0, 'time'] = this_time\r\n ca_to_tr_df.at[0, 'request_type'] = \"request\"\r\n ca_to_tr_df.at[0, 'action'] = \"pre-book\"\r\n return ca_to_tr_df",
"def plot_stress_time(F_tot, response_t, coords, t_range):\n section = np.where((t>t_range[0]) & (t<t_range[1]))[0]\n fig, ax1 = plt.subplots(figsize=[15,5])\n ax2 = ax1.twinx()\n# ax1.set_title('Load and response at '+str(coords),fontsize = 14)\n ax1.set_xlim(t_range)\n ax1.set_xlabel('t [s]')\n resp = ax1.plot(t[section], response_t[section]/10**6, color=\"#00A6D6\",\n label='Equivalent gate stress')\n ax1.set_ylabel('Stress [MPa]', fontsize=12)\n d_max = 1.2*max(response_t[section])/10**6\n d_mean = np.mean(response_t[section])/10**6\n ax1.set_ylim(d_mean-d_max,d_max)\n ax1.legend()\n\n force = ax2.plot(t[section], F_tot[section]/1000, color=\"#c3312f\", label = 'Wave force')\n ax2.set_ylabel('Integrated wave force [$kN/m$]', fontsize = 12)\n F_lim = 1.2*max(F_tot[section])/1000\n F_mean = np.mean(F_tot[section]/1000)\n ax2.set_ylim(F_mean-F_lim,F_lim)\n\n lines = resp + force\n labs = [l.get_label() for l in lines]\n ax1.grid(lw=0.25)\n ax1.legend(lines,labs, fontsize = 12)\n return fig",
"def compare_displacements(ds1,ds2):\n # Obteniendo los datos para BP\n t1 = ds1['t']\n t1 = t1[:n_im-1]\n t1 = mplt.dates.date2num(t1)\n d1 = ds1['d_t']\n # Obteniendo los datos para RMA\n t2 = ds2['t']\n t2 = t2[:n_im-1]\n t2 = mplt.dates.date2num(t2)\n d2 = ds2['d_t']\n\n # Graficando las 2 curvas juntas\n formatter = DateFormatter(\"%d/%m - %H:%M\")\n for i in range(len(d1)):\n # Hallando el valor promedio final x zona\n mean_bp = d1[i].mean()\n mean_rma = d2[i].mean()\n print(\"Valor promedio BP_zona\"+str(i)+\": \",mean_bp)\n print(\"Valor promedio RMA_zona\"+str(i)+\": \",mean_rma)\n print(\"\")\n # Graficando\n direction = 'desplazamientosPromedios_dset'+str(i_o)+'-'+str(i_o+n_im-1)+'_zona'+str(i)\n\n fig, ax= plt.subplots(figsize=(10,7))\n ax.plot_date(t1,d1[i],'b',marker='',markerfacecolor='b',markeredgecolor='b',label='Back Projection')\n ax.plot_date(t2,d2[i],'r',marker='',markerfacecolor='r',markeredgecolor='r',label='RMA')\n ax.set(xlabel='Tiempo',ylabel='Desplazamiento(mm)',title=\"Desplazamientos promedios\\n(Zona \"+str(i)+')')\n ax.xaxis.set_major_formatter(formatter)\n ax.xaxis.set_tick_params(rotation=20)\n #ax.set_xlim([R.min(),R.max()])\n ax.set_ylim([-c*1000*4/(4*fc),c*1000*4/(4*fc)])\n ax.grid(linestyle='dashed')\n ax.legend()\n plt.show()\n fig.savefig(os.getcwd()+\"/Results/Desplazamientos/\"+direction,orientation='landscape')\n\n return 'Ok'",
"def task_1():\n\n # To store the list of speeds to plot\n list_of_speeds = []\n list_of_times = []\n\n # To go from 1 through 80\n for i in range(LOW_SPEED, HIGH_SPEED + 1):\n list_of_speeds.append(i)\n time = (DISTANCE/i) * 60 * 60\n list_of_times.append(time)\n\n plt.plot(list_of_speeds, list_of_times)\n plt.xlabel(\"Speed (in mph)\")\n plt.ylabel(\"Time (in s)\")\n plt.show()",
"def SA_data_display(opt_df, all_df):\n fig, axs = plt.subplots(2, 3)\n\n axs[0,0].set_title(\"Optimal rewire attempts for circularity\")\n axs[0,0].set_ylabel(\"Percent waste %\")\n axs[0,0].set_xlabel(\"Time (s)\")\n axs[0,0].plot(opt_df[\"Time (s)\"], opt_df[\"Percent waste (%)\"])\n\n axs[0,1].set_title(\"Optimal rewire attempts acceptance probability\")\n axs[0,1].set_ylabel(\"Acceptance Probability\")\n axs[0,1].set_xlabel(\"Time (s)\") # time??\n axs[0,1].scatter(opt_df[\"Time (s)\"], opt_df[\"Probability\"])\n\n axs[0,2].set_title(\"Optimal rewire attempts temperature decrease\")\n axs[0,2].set_ylabel(\"Temperature\")\n axs[0,2].set_xlabel(\"Time (s)\") # time??\n axs[0,2].plot(opt_df[\"Time (s)\"], opt_df[\"Temperature\"])\n\n axs[1,0].set_title(\"All rewire attempts for circularity\")\n axs[1,0].set_ylabel(\"Percent waste %\")\n axs[1,0].set_xlabel(\"Time (s)\")\n axs[1,0].plot(all_df[\"Time (s)\"], all_df[\"Percent waste (%)\"])\n\n axs[1,1].set_title(\"All rewire attempts acceptance probability\")\n axs[1,1].set_ylabel(\"Acceptance Probability\")\n axs[1,1].set_xlabel(\"Time (s)\") # time??\n axs[1,1].scatter(all_df[\"Time (s)\"], all_df[\"Probability\"])\n\n axs[1,2].set_title(\"All rewire attempts temperature decrease\")\n axs[1,2].set_ylabel(\"Temperature\")\n axs[1,2].set_xlabel(\"Time (s)\") # time??\n axs[1,2].plot(all_df[\"Time (s)\"], all_df[\"Temperature\"])\n\n return plt.show()",
"def plot_comparison_GHR(data, data1):\n # Loads the different datasets\n runs = data[data.columns[0]]\n distance = data[data.columns[1]]\n\n runs1 = data1[data1.columns[0]]\n distance1 = data1[data1.columns[1]]\n\n # Forms the histogram\n plt.plot(runs, distance, label=\"Simulated Annealing\")\n plt.plot(runs1, distance1, color = 'orange', label=\"Hillclimber\")",
"def plotTime(self):\n plt.figure()\n t = [i for i in range(len(self.nodes_infected))]\n print(t)\n plt.title('Nodos infectados vs Tiempo')\n plt.xlabel('Instantes de tiempo')\n plt.ylabel('# de nodos infectados')\n plt.plot(t, self.nodes_infected)\n plt.grid(True)\n plt.show()",
"def plot_budget_analyais_results(df, fs=8, fs_title=14, lw=3, fontsize=20, colors=['#AA3377', '#009988', '#EE7733', '#0077BB', '#BBBBBB', '#EE3377', '#DDCC77']):\n df_decomposed = df.loc[df['block'] == 'decomposed']\n df_joint = df.loc[df['block'] == 'joint']\n ticklabels = []\n num_sweeps = df_decomposed['num_sweeps'].to_numpy()\n sample_sizes = df_decomposed['sample_sizes'].to_numpy()\n for i in range(len(num_sweeps)):\n ticklabels.append('K=%d\\nL=%d' % (num_sweeps[i], sample_sizes[i]))\n fig = plt.figure(figsize=(fs*2.5, fs))\n ax1 = fig.add_subplot(1, 2, 1)\n ax1.plot(num_sweeps, df_decomposed['density'].to_numpy(), 'o-', c=colors[0], linewidth=lw, label=r'$\\{\\mu, \\tau\\}, \\{c\\}$')\n ax1.plot(num_sweeps, df_joint['density'].to_numpy(), 'o-', c=colors[1], linewidth=lw,label=r'$\\{\\mu, \\tau, c\\}$')\n ax1.set_xticks(num_sweeps)\n ax1.set_xticklabels(ticklabels)\n ax1.tick_params(labelsize=fontsize)\n ax1.grid(alpha=0.4)\n ax2 = fig.add_subplot(1, 2, 2)\n ax2.plot(num_sweeps, df_decomposed['ess'].to_numpy(), 'o-', c=colors[0], linewidth=lw,label=r'$\\{\\mu, \\tau\\}, \\{c\\}$')\n ax2.plot(num_sweeps, df_joint['ess'].to_numpy(), 'o-', c=colors[1], linewidth=lw,label=r'$\\{\\mu, \\tau, c\\}$')\n ax2.set_xticks(num_sweeps)\n ax2.set_xticklabels(ticklabels)\n ax2.tick_params(labelsize=fontsize)\n ax2.grid(alpha=0.4)\n ax2.legend(fontsize=fontsize)\n ax1.legend(fontsize=fontsize)\n ax1.set_ylabel(r'$\\log \\: p_\\theta(x, \\: z)$', fontsize=35)\n ax2.set_ylabel('ESS / L', fontsize=35)",
"def plot_pupil_diameter_hist(pupil_diameter, cam_times, trials_df, cam='left'):\r\n for align_to, color in zip(['stimOn_times', 'feedback_times'], ['red', 'purple']):\r\n start_window, end_window = plt_window(trials_df[align_to])\r\n start_idx = insert_idx(cam_times, start_window)\r\n end_idx = np.array(start_idx + int(WINDOW_LEN * SAMPLING[cam]), dtype='int64')\r\n # Per trial norm\r\n pupil_all = [zscore(list(pupil_diameter[start_idx[i]:end_idx[i]])) for i in range(len(start_idx))]\r\n pupil_all_norm = [trial - trial[0] for trial in pupil_all]\r\n\r\n pupil_mean = np.nanmean(pupil_all_norm, axis=0)\r\n pupil_std = np.nanstd(pupil_all_norm, axis=0) / np.sqrt(len(pupil_all_norm))\r\n times = np.arange(len(pupil_all_norm[0])) / SAMPLING[cam] + WINDOW_LAG\r\n\r\n plt.plot(times, pupil_mean, label=align_to.split(\"_\")[0], color=color)\r\n plt.fill_between(times, pupil_mean + pupil_std, pupil_mean - pupil_std, color=color, alpha=0.5)\r\n plt.axvline(x=0, linestyle='--', c='k')\r\n plt.title(f'Pupil diameter trial avg\\n({cam.upper()} cam)')\r\n plt.xlabel('time [sec]')\r\n plt.xticks([-0.5, 0, 0.5, 1, 1.5])\r\n plt.ylabel('z-scored smoothed pupil diameter [px]')\r\n plt.legend(loc='lower right', title='aligned to')",
"def example3():\n arrive_time=example2() # Get packets arrive time using example1\n time_series.plot_time_series(arrive_time) # Plot time series using packets arrive time",
"def plot(t): \n assert isinstance(t, int), \"'t' argument should be an integer.\"\n assert t > 0, \"'t' argument should be a positive integer.\" \n # Initialize arrays with zeros to store mean cumulative rewards upto t \n # rounds for each of the three implemented bandit algorithms\n EpsGreedy_rewards = np.zeros(t)\n UCB_rewards = np.zeros(t)\n LinUCB_rewards = np.zeros(t)\n # For each round, store the mean cumulative rewards upto that round\n for i in range(1,t):\n EpsGreedy_rewards[i] = np.sum(results_EpsGreedy[0:i]) / t\n UCB_rewards[i] = np.sum(results_UCB[0:i]) / t\n LinUCB_rewards[i] = np.sum(results_LinUCB[0:i]) / t\n # Plot running per round cumulative reward\n plt.plot(range(0,t), EpsGreedy_rewards, color='b', label='e-Greedy')\n plt.plot(range(0,t), UCB_rewards, color='g', label='UCB')\n plt.plot(range(0,t), LinUCB_rewards, color='orange', label='LinUCB')\n plt.xlabel('Round')\n plt.ylabel('Mean Cumulative Reward')\n plt.title('Running Per Round Cumulative Reward')\n plt.legend()\n plt.show()",
"def plot_speed_hist(dlc_df, cam_times, trials_df, feature='paw_r', cam='left', legend=True):\r\n # Threshold the dlc traces\r\n dlc_df = likelihood_threshold(dlc_df)\r\n # For pre-GPIO sessions, remove the first few timestamps to match the number of frames\r\n cam_times = cam_times[-len(dlc_df):]\r\n if len(cam_times) != len(dlc_df):\r\n raise ValueError(\"Camera times length and DLC length are inconsistent\")\r\n # Get speeds\r\n speeds = get_speed(dlc_df, cam_times, camera=cam, feature=feature)\r\n # Windows aligned to align_to\r\n start_window, end_window = plt_window(trials_df['stimOn_times'])\r\n start_idx = insert_idx(cam_times, start_window)\r\n end_idx = np.array(start_idx + int(WINDOW_LEN * SAMPLING[cam]), dtype='int64')\r\n # Add speeds to trials_df\r\n trials_df[f'speed_{feature}'] = [speeds[start_idx[i]:end_idx[i]] for i in range(len(start_idx))]\r\n # Plot\r\n times = np.arange(len(trials_df[f'speed_{feature}'].iloc[0])) / SAMPLING[cam] + WINDOW_LAG\r\n # Need to expand the series of lists into a dataframe first, for the nan skipping to work\r\n correct = trials_df[trials_df['feedbackType'] == 1][f'speed_{feature}']\r\n incorrect = trials_df[trials_df['feedbackType'] == -1][f'speed_{feature}']\r\n plt.plot(times, pd.DataFrame.from_dict(dict(zip(correct.index, correct.values))).mean(axis=1),\r\n c='k', label='correct trial')\r\n plt.plot(times, pd.DataFrame.from_dict(dict(zip(incorrect.index, incorrect.values))).mean(axis=1),\r\n c='gray', label='incorrect trial')\r\n plt.axvline(x=0, label='stimOn', linestyle='--', c='r')\r\n plt.title(f'{feature.capitalize()} speed trial avg\\n({cam.upper()} cam)')\r\n plt.xticks([-0.5, 0, 0.5, 1, 1.5])\r\n plt.xlabel('time [sec]')\r\n plt.ylabel('speed [px/sec]')\r\n if legend:\r\n plt.legend()\r\n\r\n return plt.gca()",
"def plot_race(url):\n #hey, thanks again for these functions!\n idrace = id_from_url(url)\n xml = get_poll_lxml(idrace) \n colors = plot_colors(xml)\n\n if len(colors) == 0:\n return\n \n #really, you shouldn't have\n result = race_result(url)\n \n poll_plot(idrace)\n plt.xlabel(\"Date\")\n plt.ylabel(\"Polling Percentage\")\n for r in result:\n plt.axhline(result[r], color=colors[_strip(r)], alpha=0.6, ls='--')",
"def results_plot_fuel_reactor(self):\n \n import matplotlib.pyplot as plt \n\n # Total pressure profile\n P = []\n for z in self.MB_fuel.z:\n P.append(value(self.MB_fuel.P[z]))\n fig_P = plt.figure(1)\n plt.plot(self.MB_fuel.z, P)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total Pressure [bar]\") \n\n # Temperature profile\n Tg = []\n Ts = []\n# Tw = []\n for z in self.MB_fuel.z:\n Tg.append(value(self.MB_fuel.Tg[z] - 273.15))\n Ts.append(value(self.MB_fuel.Ts[z] - 273.15))\n# Tw.append(value(self.MB_fuel.Tw[z]))\n fig_T = plt.figure(2)\n plt.plot(self.MB_fuel.z, Tg, label='Tg')\n plt.plot(self.MB_fuel.z, Ts, label='Ts')\n# plt.plot(self.MB_fuel.z, Tw, label='Tw')\n plt.legend(loc=0,ncol=2)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Temperature [C]\") \n \n # Superficial gas velocity and minimum fluidization velocity\n vg = []\n umf = []\n for z in self.MB_fuel.z:\n vg.append(value(self.MB_fuel.vg[z]))\n umf.append(value(self.MB_fuel.umf[z]))\n fig_vg = plt.figure(3)\n plt.plot(self.MB_fuel.z, vg, label='vg')\n plt.plot(self.MB_fuel.z, umf, label='umf')\n plt.legend(loc=0,ncol=2)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Superficial gas velocity [m/s]\")\n \n # Gas components molar flow rate\n for j in self.MB_fuel.GasList:\n F = []\n for z in self.MB_fuel.z:\n F.append(value(self.MB_fuel.F[z,j]))\n fig_F = plt.figure(4)\n plt.plot(self.MB_fuel.z, F, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Gas component molar flow rate, F [mol/s]\") \n \n # Bulk gas phase total molar flow rate\n Ftotal = []\n for z in self.MB_fuel.z:\n Ftotal.append(value(self.MB_fuel.Ftotal[z]))\n fig_Ftotal = plt.figure(5)\n plt.plot(self.MB_fuel.z, Ftotal)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total molar gas flow rate [mol/s]\") \n\n # Solid components mass flow rate\n for j in self.MB_fuel.SolidList:\n M = []\n for z in self.MB_fuel.z:\n M.append(value(self.MB_fuel.Solid_M[z,j]))\n fig_M = plt.figure(6)\n plt.plot(self.MB_fuel.z, M, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.SolidList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Solid components mass flow rate [kg/s]\")\n \n # Bulk solid phase total molar flow rate\n Mtotal = []\n for z in self.MB_fuel.z:\n Mtotal.append(value(self.MB_fuel.Solid_M_total[z]))\n fig_Mtotal = plt.figure(7)\n plt.plot(self.MB_fuel.z, Mtotal)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Solid total mass flow rate [kg/s]\") \n \n # Gas phase concentrations\n for j in self.MB_fuel.GasList:\n Cg = []\n for z in self.MB_fuel.z:\n Cg.append(value(self.MB_fuel.Cg[z,j]))\n fig_Cg = plt.figure(8)\n plt.plot(self.MB_fuel.z, Cg, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Concentration [mol/m3]\") \n \n # Gas phase mole fractions\n for j in self.MB_fuel.GasList:\n y = []\n for z in self.MB_fuel.z:\n y.append(value(self.MB_fuel.y[z,j]))\n fig_y = plt.figure(9)\n plt.plot(self.MB_fuel.z, y, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"y [-]\") \n \n # Solid phase mass fractions\n for j in self.MB_fuel.SolidList:\n x = []\n for z in self.MB_fuel.z:\n x.append(value(self.MB_fuel.x[z,j]))\n fig_x = plt.figure(10)\n plt.plot(self.MB_fuel.z, x, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.SolidList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"x [-]\") \n\n # Total mass fraction\n xtot = []\n for z in self.MB_fuel.z:\n xtot.append(value(self.MB_fuel.xtot[z]))\n fig_xtot = plt.figure(11)\n plt.plot(self.MB_fuel.z, xtot)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total mass fraction [-]\") \n \n # # Gas mix density\n # rhog = []\n # for z in self.MB_fuel.z:\n # rhog.append(value(self.MB_fuel.rho_vap[z]))\n # fig_rhog = plt.figure(23)\n # plt.plot(self.MB_fuel.z, rhog)\n # plt.grid()\n # plt.xlabel(\"Bed height [-]\")\n # plt.ylabel(\"Gas mix density [kg/m3]\") \n \n # Fe conversion\n X_Fe = []\n for z in self.MB_fuel.z:\n X_Fe.append(value(self.MB_fuel.X[z])*100)\n fig_X_Fe = plt.figure(13)\n plt.plot(self.MB_fuel.z, X_Fe)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Fraction of metal oxide converted [%]\")",
"def plot_muscle_activations(side,t_start,t_stop):\n \n time, ankle_l_trajectory, ankle_r_trajectory,foot_l_contact,foot_r_contact,muscle_lh_activations, muscle_rh_activations,muscle_lh_forces,muscle_rh_forces,joint_lh_positions,joint_rh_positions = load_data()\n\n index_start = np.where(time == t_start)[0][0]\n index_end = np.where(time == t_stop)[0][0]\n \n time = time[index_start:index_end+1]\n muscle_rh_activations = muscle_rh_activations[index_start:index_end+1,:]\n muscle_lh_activations = muscle_lh_activations[index_start:index_end+1,:]\n \n #time=np.linspace(1,len(ankle_l_trajectory[:,0]),len(ankle_l_trajectory[:,0]));\n if side =='right':\n muscle_activations = muscle_rh_activations\n elif side == 'left':\n muscle_activations = muscle_lh_activations \n else:\n return\n \n plt.figure('Muscle activations')\n plt.subplot(421)\n plt.plot(time,muscle_activations[:,0])\n plt.title('Muscle PMA')\n #plt.xlabel('Time [s]')\n plt.ylabel('Muscle activation')\n\n plt.subplot(422)\n plt.plot(time,muscle_activations[:,1])\n plt.title('Muscle CF')\n #plt.xlabel('Time [s]')\n #plt.ylabel('Muscle activation') \n\n plt.subplot(423) \n plt.plot(time,muscle_activations[:,2])\n plt.title('Muscle SM')\n #plt.xlabel('Time [s]')\n plt.ylabel('Muscle activation')\n \n plt.subplot(424) \n plt.plot(time,muscle_activations[:,3])\n plt.title('Muscle POP')\n #plt.xlabel('Time [s]')\n #plt.ylabel('Muscle activation') \n \n plt.subplot(425) \n plt.plot(time,muscle_activations[:,4])\n plt.title('Muscle RF')\n# plt.xlabel('Time [s]')\n plt.ylabel('Muscle activation') \n\n plt.subplot(426) \n plt.plot(time,muscle_activations[:,5])\n plt.title('Muscle TA')\n# plt.xlabel('Time [s]')\n #plt.ylabel('Muscle activation') \n \n plt.subplot(427) \n plt.plot(time,muscle_activations[:,6])\n plt.title('Muscle SOL')\n plt.xlabel('Time [s]')\n plt.ylabel('Muscle activation') \n \n plt.subplot(428) \n plt.plot(time,muscle_activations[:,7])\n plt.title('Muscle LG')\n plt.xlabel('Time [s]')\n \n plt.subplots_adjust(hspace=0.5)\n #plt.ylabel('Muscle activation') \n# plt.suptitle('Decomposition of the trajectories of the hind feet')\n \n# plt.suptitle('Muscle activations of the '+ side + ' limb')\n plt.show()\n return",
"def update_hospitals_lists(\n hospital_times_1, hospital_times_2, times_1, times_2, measurement_type\n):\n if measurement_type == \"w\":\n hospital_times_1.append(\n np.nanmean([np.nanmean(w.waiting_times) for w in times_1])\n )\n hospital_times_2.append(\n np.nanmean([np.nanmean(w.waiting_times) for w in times_2])\n )\n else:\n hospital_times_1.append(\n np.nanmean([np.nanmean(b.blocking_times) for b in times_1])\n )\n hospital_times_2.append(\n np.nanmean([np.nanmean(b.blocking_times) for b in times_2])\n )\n return hospital_times_1, hospital_times_2",
"def ratio(gb_data, data_depcode, data_ratio_hospitalises,current_date, data_hospitalises, current_date_file, min_value_80p , nbhospitalises_80p) :\n start = time.time()\n fig, ax = plt.subplots(figsize=(12, 8))\n\n plt.title(f\"Ratio of in-hospital deaths to hospitalizations : {current_date}\", fontsize=20)\n plt.ylabel(\"Total number of deceases / Total number of hospitalized\")\n plt.xlabel(\"Total number of hospitalized\")\n\n for i, txt in enumerate(data_depcode):\n if (data_hospitalises[i] > data_hospitalises.max() * 0.20):\n ax.annotate(txt, (data_hospitalises[i], data_ratio_hospitalises[i]), xytext=(data_hospitalises[i] + 20, data_ratio_hospitalises[i])) \n\n plt.axhline(data_ratio_hospitalises.mean(), color='green', linestyle='--', label=f'average death ratio ({data_ratio_hospitalises.mean():.2f}%)')\n\n plt.axvline(min_value_80p, color='pink', linestyle='-', label=f\"80% of the number of hospitalized people in France are on the right side of the line ({nbhospitalises_80p:.0f} hospitalized)\")\n\n ax.scatter(data_hospitalises, data_ratio_hospitalises)\n\n ax.annotate('updated chart',xy=(1, 0), xytext=(-15, 10), fontsize=15,\n xycoords='axes fraction', textcoords = 'offset points',\n bbox=dict(facecolor = 'white', alpha = 0.9),\n horizontalalignment = 'right', verticalalignment = 'bottom')\n\n ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.0f%%'))\n plt.legend()\n\n current_date_file = gb_data['date'].max().strftime('%Y%m%d')\n end = time.time()\n print(\"Time spent on ratio plot: {0:.5f} s.\".format(end - start)) \n plt.show()",
"def plot_bus_load(self):\n stops = {key: 0 for key, _ in self.route.timetable().items()}\n for passenger in self.passengers:\n trip = self.passenger_trip(passenger)\n stops[trip[0][1]] += 1\n stops[trip[1][1]] -= 1\n prev = None\n for i, stop in enumerate(stops):\n if i > 0:\n stops[stop] += stops[prev]\n prev = stop\n fig, ax = plt.subplots()\n ax.step(range(len(stops)), list(stops.values()), where=\"post\")\n ax.set_xticks(range(len(stops)))\n ax.set_xticklabels(list(stops.keys()))\n return fig, ax",
"def waiting_times(all_data):\n print('Computing waiting times')\n result = {'p': [], 'alpha': [], 'durations': []}\n for data in all_data:\n N = data['config']['N']\n p = data['config']['p']\n alpha = data['config']['alpha']\n print(f'p = {p}, alpha = {alpha}')\n\n # find dominant strategy at each point in time\n print(' > Finding dominant strategies')\n dom_strats = np.asarray(list(map(lambda e: get_dominant_strategy(e), data['snapshots'])))\n print(f' >> Found {np.unique(dom_strats).size} unique strategies')\n\n if np.unique(dom_strats).size <= 1:\n print(' >> Skipping')\n continue\n\n # detect dominant strategy changes (and durations)\n print(' > Computing durations')\n durations = get_domain_durations(dom_strats)\n durations /= N**2\n print(f' >> Found {durations.size} durations')\n\n # store result\n result['p'].extend([p]*len(durations))\n result['alpha'].extend([alpha]*len(durations))\n result['durations'].extend(durations)\n\n df = pd.DataFrame(result)\n\n # plot w-time distributions\n print(' > Plotting')\n for p in df['p'].unique():\n sub = df[df['p']==p]\n\n plt.figure()\n for alpha, group in sub.groupby(['alpha']):\n sns.distplot(\n group['durations'],\n kde=False, label=rf'$\\alpha={alpha}$')\n\n plt.title(rf'Distribution of waiting times ($p={p}$)')\n plt.xlabel(r'$\\Delta t$')\n plt.ylabel(r'count')\n plt.legend(loc='best')\n\n plt.savefig(f'images/waiting_times_p{p}.pdf')\n\n ## plot wtd dependence on parameters\n plt.figure()\n sns.boxplot(x='alpha', y='durations', hue='p', data=df)\n plt.savefig('images/waiting_times_vs_alpha.pdf')\n plt.close()\n\n return df",
"def showPlot2():\n interested_in = list(range(1,10))\n proc_sim_data = []\n for item in interested_in:\n len_sim_data = []\n raw_sim_data = runSimulation(item, 1.0, 25, 25, 0.75, 100, Robot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n proc_sim_data.append(sum(len_sim_data)/len(len_sim_data))\n plot(interested_in, proc_sim_data)\n title('Dependence of cleaning time on number of robots')\n xlabel('number of robots (tiles)')\n ylabel('mean time (clocks)')\n show()",
"def graphformation(time_lower, time_upper):\n\tprm = param.Para()\n\ttry:\n\t\tdb_connection = mysql.connector.connect(\n\t\t host=prm.hostname,\n\t\t user=prm.username,\n\t\t passwd=prm.password,\n\t\t database= prm.dbname\n\t\t )\n\t\tdb_cursor = db_connection.cursor()\n\texcept:\n\t\tprint(\"Can't Connect to database, check credentials in parameter file\")\n\tquery = (\"SELECT * FROM identity \")\n\tdb_cursor.execute(query)\n\tdf1=pd.DataFrame(db_cursor.fetchall())\n\tdf1.columns= ['node','deviceid','student','rollno']\n\tdict_identity = dict(zip(df1.deviceid, df1.node))\n\trev_dict_identity = dict(zip(df1.node, df1.deviceid ))\n\tquery = (\"SELECT * FROM activity WHERE time BETWEEN '{}' AND '{}'\".format(time_lower,time_upper)) ## incomplete\n\tdb_cursor.execute(query)\n\tactivity_data = pd.DataFrame(db_cursor.fetchall())\n\tif activity_data.empty==False:\n\t\tactivity_data.columns=[\"sl_no\",\"time\",\"node\",\"latitude\",\"longitude\"]\n\telse:\n\t\tprint(\"No Activity in the selected Time Window\")\n\t\treturn\n\tnumnodes= len(df1)\n\tedges= []\n\tscore = {}\n\t#print(activity_data)\n\ttime_groups = activity_data.groupby('time')\n\twith open(r'C:\\Users\\HP\\Desktop\\project\\Contact_Graph\\bluetooth.txt') as json_file:\n\t\tdata1 = json.load(json_file)\n\tfor name, group in time_groups:\n\t\tscore_tmp = decayfunc(name,time_upper)\n\t\tgroup = group.sort_values('node')\n\t\tfor i in range(len(group)-1):\n\t\t\tnode1 = group.iloc[i,2]\n\t\t\t###########################\n\t\t\tlistnearby=[]\n\t\t\ttry:\n\t\t\t\tlistnearby = data1[rev_dict_identity[node1]][str(name)]\n\t\t\t\tlistnearby = [dict_identity[i] for i in listnearby if dict_identity[i]>node1]\n\t\t\t\tfor i in listnearby:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tscore[(node1,i)]+=1\n\t\t\t\t\texcept:\n\t\t\t\t\t\tscore[(node1,i)]=1\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\t###########################\n\t\t\tfor j in range(i+1,len(group)):\n\t\t\t\tnode2 =group.iloc[j,2]\n\t\t\t\tif proximityfunc(group.iloc[i,3],group.iloc[i,4],group.iloc[j,3],group.iloc[j,4]) and node2 not in listnearby:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tscore[(group.iloc[i,2],group.iloc[j,2])]+=1\n\t\t\t\t\texcept:\n\t\t\t\t\t\tscore[(group.iloc[i,2],group.iloc[j,2])]=1\n\tnode_list = list(df1.node)\n\ttitle_list = list(df1.deviceid)\n\tedges_list = []\n\tfor edge,val in score.items():\n\t\tedges_list.append((int(edge[0]),int(edge[1]),float(val)))\n\n\treturn edges_list,node_list,title_list",
"def updatefig(*args):\n p1.set_array(turn(grid))\n p2.set_data(tally['time'], tally['sickos'])\n p3.set_data(tally['time'], tally['immune'])\n p4.set_data(tally['time'], tally['dead'])\n ax2.set_xlim(0, max(tally['time']))\n # ax2.set_ylim(0, max(max(sickos), max(immune)))\n # End sim if the disease is gone\n if tally['sickos'][-1] == 0:\n ani.event_source.stop()\n end_time = time.process_time()\n show_summary()\n print(\"Process time:\", end_time - start_time)\n return p1, p2, p3, p4,",
"def plot_demand_timeseries(\n a: pd.DataFrame,\n b: pd.DataFrame = None,\n window: int = 168,\n title: str = None,\n path: str = None,\n) -> None:\n plt.figure(figsize=(16, 8))\n # Plot predicted\n for field, color in [(\"demand_mwh\", \"grey\"), (\"scaled_demand_mwh\", \"orange\")]:\n if field not in a:\n continue\n y = a[field]\n if window:\n y = y.rolling(window).mean()\n plt.plot(\n a[\"utc_datetime\"], y, color=color, alpha=0.5, label=f\"Predicted ({field})\"\n )\n # Plot expected\n if b is not None:\n y = b[\"demand_mwh\"]\n if window:\n y = y.rolling(window).mean()\n plt.plot(\n b[\"utc_datetime\"], y, color=\"red\", alpha=0.5, label=\"Reference (demand_mwh)\"\n )\n if title:\n plt.title(title)\n plt.ylabel(\"Demand (MWh)\")\n plt.legend()\n if path:\n plt.savefig(path, bbox_inches=\"tight\")\n plt.close()",
"def H_perform_plot(performance, hurricane):\n fig = plt.figure(figsize = (15, 10))\n for i in range(len(performance)):\n temp1 = performance[i]\n temp2 = hurricane[i]\n plt.plot(np.arange(0, len(temp1), 1), temp1, color = temp2.c, label = temp2.name)\n plt.xlabel('Time Step')\n plt.xticks(np.arange(0, len(temp1), 30))\n plt.ylabel('Performance')\n plt.legend(bbox_to_anchor=(1, 1), loc='upper left', ncol=1, frameon = 0)\n plt.grid(True)",
"def update_lineplots(self, diff_sec):\n current_goals = []\n for ig, line, ax, goal, metric in zip(range(len(self.data.goals)), self.completed_lines, self.axes[:,0],\n self.data.goals, self.data.metrics_history):\n if line is not None:\n line.remove()\n ax.collections.clear()\n self.completed_lines[ig], = ax.plot(self.data.work_time_hours, metric, color=(64./255,173./255,233./255),\n linewidth=2)\n\n m = goal/(self.stop_hour_val - self.start_hour_val)\n new_inds = (diff_sec+1) if diff_sec > 0 else 2\n current_goal = m * (self.data.work_time_hours[-new_inds:]-self.start_hour_val)\n if np.any(current_goal > goal):\n current_goal = np.ones_like(current_goal)*goal\n current_goals.append(current_goal)\n # self.data.goal_hours[ig] = np.append(self.data.goal_hours[ig, :-1], current_goal)\n\n goal_hours = np.append(self.data.goal_hours[ig, :-1], current_goal)\n # ax.plot(self.data.work_time_hours, self.data.goal_hours[ig], color='orange', marker='o')\n\n ax.fill_between(self.data.work_time_hours, metric, goal_hours,\n where=goal_hours >= metric, facecolor='orangered',\n interpolate=True)\n\n ax.fill_between(self.data.work_time_hours, metric, goal_hours,\n where=goal_hours <= metric, facecolor='lime',\n interpolate=True)\n\n self.canvas.draw()\n\n\n self.data.goal_hours = np.append(self.data.goal_hours[:, :-1], current_goals, axis=1)",
"def get_two_hospital_plot_labels(measurement_type):\n if measurement_type == \"w\":\n title = \"Waiting times of two hospitals over different distribution of patients\"\n y_axis_label = \"Waiting Time\"\n else:\n title = (\n \"Blocking times of two hospitals over different distribution of patients\"\n )\n y_axis_label = \"Blocking Time\"\n x_axis_label = \"Hospital 1 arrival proportion\"\n return (x_axis_label, y_axis_label, title)"
]
| [
"0.61512375",
"0.5727072",
"0.55952674",
"0.54958403",
"0.54581094",
"0.5438808",
"0.54312366",
"0.5414995",
"0.53700393",
"0.5358794",
"0.5342512",
"0.5331552",
"0.5290692",
"0.52683496",
"0.524381",
"0.5241239",
"0.522444",
"0.5208598",
"0.5199821",
"0.51853096",
"0.51832557",
"0.5177997",
"0.5171982",
"0.51700664",
"0.51699215",
"0.5166922",
"0.51642424",
"0.5160463",
"0.51363385",
"0.51275367"
]
| 0.6441067 | 0 |
Navigate and extract data about avalanche status | def navigate_and_extract_avalanche_data(self):
self.browser.get(self.url)
avalanche_status = {}
try:
avalanche_level = self.browser.find_element_by_xpath(
'//*[@id="law-master"]/div[1]/div[1]/span/span')
avalanche_status['avalanche_level'] = avalanche_level.text
avalanche_warning_published = (
self.browser.find_element_by_class_name('law-mst-iat'))
avalanche_status['avalanche_warning_published'] = (
avalanche_warning_published.text)
avalanche_warning_valid_until = (
self.browser.find_element_by_class_name('law-mst-exp'))
avalanche_status['avalanche_warning_valid_until'] = (
avalanche_warning_valid_until.text)
avalanche_description = (
self.browser.find_element_by_class_name("law-mst-dsc"))
avalanche_status['avalanche_description'] = (
avalanche_description.text.replace('\n', ' '))
except NoSuchElementException as error:
logging.info(f"""During scraping a website: {self.url} error has
occured {error}""")
return avalanche_status | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_avalanche_status():\n avalanche = AvalancheWarningScraper(\"http://lawiny.topr.pl/\")\n avalanche_status = avalanche.navigate_and_extract_avalanche_data()\n return avalanche_status",
"def status(self):",
"def getStatus():",
"def get_status(self):\n r = requests.get(self.base_url + '/status')\n return r.json()",
"def status(self):\n r = requests.get('/'.join([self.base_url, self.ENDPOINT_STATUS]))\n return r.json()",
"def _read_status(self):\n results = self.status_table.query_items({'api_version': self.api_version})\n if not results:\n return None\n else:\n return results[0]",
"def show_status():\n\n pass",
"def get_status() -> None:\n assert scraper.get_status() == True",
"def status() -> Dict[str, Any]:",
"def check_status():\n logger.debug(\"Starting the check_status() routine.\")\n\n url = \"https://www.toggl.com/api/v8/time_entries/current\"\n token = os.environ[\"TOGGL_API_TOKEN\"]\n auth_token = base64.b64encode(f\"{token}:api_token\".encode()).decode()\n resp = requests.get(url, headers={\"Authorization\": \"Basic \" + auth_token})\n\n cols = \"id\", \"duration\", \"description\"\n status = {k: v for k, v in (resp.json()[\"data\"] or {}).items() if k in cols}\n logger.debug(f\"{'Something' if 'id' in status else 'Something'} is being tracked.\")\n\n return status",
"def extract_status(self, status) -> None:\r\n if \"VehicleInfo\" in status:\r\n if \"RemoteHvacInfo\" in status[\"VehicleInfo\"]:\r\n self.hvac = status[\"VehicleInfo\"][\"RemoteHvacInfo\"]\r\n\r\n if \"ChargeInfo\" in status[\"VehicleInfo\"]:\r\n self.battery = status[\"VehicleInfo\"][\"ChargeInfo\"]",
"async def statusinfo(self, astable):\n cmd = subprocess.check_output([\"birdc\", \"show\", \"proto\", \"all\", str(astable)])\n for page in chat_formatting.pagify(cmd.decode(), ['\\n', ' '], shorten_by=12):\n await self.bot.say(chat_formatting.box(page))",
"def status(self):\n return self._get(path='status')",
"def status():\n _request('worklog/status/')",
"def comando_status(self):\r\n\tif args.tipo == 'web':\r\n return self.status_web()\r\n\r\n\tif args.tipo == 'nfce':\r\n return self.consulta_status_nfce()\r\n\r\n\tif args.tipo == 'dual':\r\n return self.status_impressora_dual()",
"def status(self):\n self.scion_sh('status')",
"def test_get_status(self):\n pass",
"def test_get_status(self):\n pass",
"def detailed_status(self) -> str:\n return pulumi.get(self, \"detailed_status\")",
"def detail(self):\n return self.status[\"health\"][\"detail\"]",
"def test_get_status(self):\n response = self.client.open(\n '/v1/status',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def remote_status():",
"def status(self):\n logging.debug(\"%s entered status\" % self)\n # print_config(self.infra)\n # print self.images\n # headers = [\"Machine Name\", \"Flavor\", \"IP Addresses\", \"Image Name\", \"Status\"]\n # pt = prettytable.PrettyTable(headers)\n # pt.align[\"Machine Name\"]=\"l\"\n # pt.align[\"IP Addresses\"] = \"l\"\n # pt.align[\"Image Name\"] = \"l\"\n # pt.align[\"Status\"] = \"r\"\n \n print \"Checking status of %s\" % self.footprint_name\n # tmpl = \"%(machine_name)-20s%(flavor)5s%(status)-30s\"\n tmpl1 = \"\"\"%-20s%-52s[%s]\"\"\"\n tmpl2 = \"\"\"%-20s%-60s\\n\"\"\"\n print tmpl1 % (\"Machine Name\", \"IP Addresses\", \"Status\")\n print 80 * \"-\"\n \n for machine in self.machines.keys():\n m = self.machines[machine]\n # machine_name = m.machine_name\n # ips = str(m.ip_addresses)\n # flavor = str(m.flavor)\n # img = str(m.image_id)\n # status = str(m.status)\n # pt.add_row([m, ips, status, img, status])\n # print \"FFF\", m, ips, flavor, img, status\n # print tmpl % locals()\n print m.status\n \n return \"%s is currently: %s\" % (self.footprint_name, self.footprint_status)",
"def _print_status(self):",
"def getInfoOnStatus(self):\n raise NotImplementedError();",
"def print_status(data):\n print \"0x%x:0x%x:0x%x:0x%x:0x%x:%d:%d:%d:%d:%d:%d:%d:%d\" % (\n data['sysVer'], data['barVer'], data['uvVer'], data['rccVer'],\n data['windVer'], data['batteryRain'], data['batteryUV'],\n data['batteryWind'], data['battery5'], data['battery4'],\n data['battery3'], data['battery2'], data['battery1'])",
"def GetStatus(self):\r\n return self.status",
"def status(self, id):",
"async def get_status():",
"def printStatus(self, status):\n \"\"\" This probably breaks SOI \"\"\"\n\n if hasattr(self, 'show_state') and self.show_state:\n print(\"=============== \" + str(status[\"count\"]) + \" ===============\")\n print(\"Current best energy: \" + str(status[\"bestEnergy\"]) + \" from state: \" + str(status[\"bestState\"]))\n print(\"last accepted energy: \" + str(status[\"energy\"]) + \" from state: \" + str(status[\"state\"]))\n print(\"current temperature: \" + str(status[\"temperature\"]))\n else:\n print(\"=============== \" + str(status[\"count\"]) + \" ===============\")\n print(\"Current best energy: \" + str(status[\"bestEnergy\"]) )\n print(\"last accepted energy: \" + str(status[\"energy\"]) )\n print(\"current temperature: \" + str(status[\"temperature\"]))"
]
| [
"0.7122475",
"0.6711828",
"0.66904646",
"0.64496374",
"0.63534707",
"0.6331564",
"0.6314084",
"0.6237637",
"0.6213105",
"0.61849993",
"0.61831856",
"0.61782026",
"0.6148291",
"0.6140133",
"0.61341983",
"0.61112785",
"0.60923004",
"0.60923004",
"0.60893565",
"0.6074108",
"0.6073303",
"0.60730225",
"0.6064412",
"0.6062322",
"0.60470885",
"0.60324246",
"0.6027398",
"0.60127574",
"0.60105044",
"0.6003017"
]
| 0.7464375 | 0 |
Function for getting a current avalanche status in Tatra Mountain | def get_avalanche_status():
avalanche = AvalancheWarningScraper("http://lawiny.topr.pl/")
avalanche_status = avalanche.navigate_and_extract_avalanche_data()
return avalanche_status | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getStatus():",
"def get_status(self):\n r = requests.get(self.base_url + '/status')\n return r.json()",
"def _do_get_status(self):\n logging.info(__name__ + ' : Get status of the device.')\n result = self._execute('X')\n usage = {\n 0: \"Channel not in use\",\n 1: \"Channel used for Nitrogen level\",\n 2: \"Channel used for Helium Level (Normal pulsed operation)\",\n 3: \"Channel used for Helium Level (Continuous measurement)\",\n 9: \"Error on channel (Usually means probe unplugged)\"\n }\n # current_flowing = {\n # 0 : \"Curent not flowing in Helium Probe Wire\",\n # 1 : \"Curent not flowing in Helium Probe Wire\"\n # }\n # auto_fill_status = {\n # 00 : \"End Fill (Level > FULL)\",\n # 01 : \"Not Filling (Level < FULL, Level > FILL)\",\n # 10 : \"Filling (Level < FULL, Level > FILL)\",\n # 11 : \"Start Filling (Level < FILL)\"\n # }\n return usage.get(int(result[1]), \"Unknown\")",
"def _getCurrentComponentStatus(self):\n resOverall = self.sysAdminClient.getOverallStatus()\n if not resOverall['OK']:\n return resOverall\n currentStatus = {'Down': set(), 'Run': set(), 'All': set()}\n informationDict = resOverall['Value']\n for systemsDict in informationDict.values():\n for system, instancesDict in systemsDict.items():\n for instanceName, instanceInfoDict in instancesDict.items():\n identifier = '%s__%s' % (system, instanceName)\n runitStatus = instanceInfoDict.get('RunitStatus')\n if runitStatus in ('Run', 'Down'):\n currentStatus[runitStatus].add(identifier)\n\n currentStatus['All'] = currentStatus['Run'] | currentStatus['Down']\n return S_OK(currentStatus)",
"def get_status(self):\n return self.read_register(259, 0, 3)",
"def GetStatus(self):\r\n return self.status",
"def status(self, station=1):\n return self.statuslist()[station][2]",
"def status(self):\n return self._get(path='status')",
"def fusion_api_get_appliance_status(self, api=None, headers=None):\n return self.info.get_status(api=api, headers=headers)",
"def _read_status(self):\n results = self.status_table.query_items({'api_version': self.api_version})\n if not results:\n return None\n else:\n return results[0]",
"async def get_status():",
"def remote_status():",
"def status(self):\n return self._call_txtrader_api('status', {})",
"def status(self):\n r = requests.get('/'.join([self.base_url, self.ENDPOINT_STATUS]))\n return r.json()",
"def get_apriori_antenna_status_enum():\n apa = AprioriAntenna()\n return apa.status_enum()",
"def status(self):\n self.scion_sh('status')",
"def comando_status(self):\r\n\tif args.tipo == 'web':\r\n return self.status_web()\r\n\r\n\tif args.tipo == 'nfce':\r\n return self.consulta_status_nfce()\r\n\r\n\tif args.tipo == 'dual':\r\n return self.status_impressora_dual()",
"def amtool_status(self, mess, args):\n self.log.info(\"Current config {0}\".format(self.config))\n self.log.info(\n \"Alertmanager @ {0}\".format(self.config['server_address']))\n helper = AmtoolHelper(\n alertmanager_address=self.config['server_address'])\n result = helper.get_status()\n return result",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")"
]
| [
"0.73289245",
"0.6945706",
"0.6903834",
"0.6889021",
"0.68693817",
"0.68224365",
"0.68129367",
"0.67844456",
"0.6728177",
"0.67269623",
"0.6716174",
"0.6674203",
"0.6642291",
"0.66047305",
"0.65939426",
"0.657994",
"0.6577985",
"0.65665865",
"0.6546385",
"0.6546385",
"0.6546385",
"0.6546385",
"0.6546385",
"0.6546385",
"0.6546385",
"0.6546385",
"0.6546385",
"0.6546385",
"0.6546385",
"0.6546385"
]
| 0.731255 | 1 |
Return disk usage statistics about the given path. | def disk_usage(path):
st = os.statvfs(path)
free = (st.f_bavail * st.f_frsize)/ 1024
total = st.f_blocks * st.f_frsize
used = (st.f_blocks - st.f_bfree) * st.f_frsize
return DiskUsage(total, used, free) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def disk_usage(path):\n fs.disk_usage(path)",
"def get_diskusage(path):\n st = os.statvfs(path)\n free = st.f_bavail * st.f_frsize\n total = st.f_blocks * st.f_frsize\n used = (st.f_blocks - st.f_bfree) * st.f_frsize\n return float(used)/total",
"def disk_usage(path):\n if PY3 and isinstance(path, bytes):\n # XXX: do we want to use \"strict\"? Probably yes, in order\n # to fail immediately. After all we are accepting input here...\n path = path.decode(ENCODING, errors=\"strict\")\n total, free = cext.disk_usage(path)\n used = total - free\n percent = usage_percent(used, total, round_=1)\n return _common.sdiskusage(total, used, free, percent)",
"def _disk_usage(path: pathlib.Path):\n if path.is_file():\n return path.stat().st_size\n elif path.is_dir():\n size_bytes = 0\n for file in path.iterdir():\n size_bytes += _disk_usage(file)\n return size_bytes\n else:\n raise NotImplementedError(\"What filetype is {file}?\")",
"def disk_usage(path):\n st = os.statvfs(path)\n total = st.f_blocks * st.f_frsize\n used = (st.f_blocks - st.f_bfree) * st.f_frsize\n return total, used",
"def disk_usage(path):\n st = os.statvfs(path)\n free = (st.f_bavail * st.f_frsize)\n total = (st.f_blocks * st.f_frsize)\n used = (st.f_blocks - st.f_bfree) * st.f_frsize\n try:\n percent = ret = (float(used) / total) * 100\n except ZeroDivisionError:\n percent = 0\n # NB: the percentage is -5% than what shown by df due to\n # reserved blocks that we are currently not considering:\n # http://goo.gl/sWGbH\n #return usage_ntuple(total, used, free, round(percent, 1))\n return round(percent,1)",
"def disk_usage(path):\n st = os.statvfs(path)\n free = (st.f_bavail * st.f_frsize)\n total = (st.f_blocks * st.f_frsize)\n used = (st.f_blocks - st.f_bfree) * st.f_frsize\n try:\n percent = ret = (float(used) / total) * 100\n except ZeroDivisionError:\n percent = 0\n # NB: the percentage is -5% than what shown by df due to\n # reserved blocks that we are currently not considering:\n # http://goo.gl/sWGbH\n return usage_ntuple(convertToGB(total), convertToGB(used), convertToGB(free), round(percent, 1))",
"def get_disk_usage(path):\n # ---------------------------------------------------------------------\n logger.debug(\"get_disk_usage\")\n total_size = 0\n cwd = os.getcwd()\n if os.path.exists(path):\n os.chdir(path)\n cmd = [\"du\", \"--summarize\", \"--block-size=1\"]\n try:\n total_size = subprocess.check_output(cmd)\n total_size = total_size.replace(\"\\t.\\n\", \"\")\n except subprocess.CalledProcessError:\n msg = \"Error executing command = '{0}'\".format(cmd)\n logger.warning(msg)\n os.chdir(cwd)\n return int(total_size)",
"def disk_usage(path):\n total = os.path.getsize(path) # Account for direct usage of directory\n if os.path.isdir(path): # if this is a dir\n for filename in os.listdir(path): # go through the child of the directory\n childpath = os.path.join(path, filename) # Compose full path to child\n total += disk_usage(childpath)\n\n print('{0:<7}'.format(total), path)\n return total",
"def GetDiskUsage(path):\n cmd = [\"du\", \"-b\", \"-k\", \"-s\", path]\n output = common.RunAndCheckOutput(cmd, verbose=False)\n return int(output.split()[0]) * 1024",
"def disk_usage(path):\n total = os.path.getsize(path)\n if os.path.isdir(path):\n for filename in os.listdir(path):\n childpath = os.path.join(path, filename)\n total += disk_usage(childpath)\n return total",
"def get_disk_usage():\n return psutil.disk_usage(os.path.abspath(os.sep))",
"def _get_drive_usage(path):\n if sys.version_info >= (3, 3):\n usage = shutil.disk_usage(path)\n return {\"total\": usage.total, \"used\": usage.used, \"free\": usage.free}\n if on_android():\n from jnius import autoclass\n\n StatFs = autoclass(\"android.os.StatFs\")\n AndroidString = autoclass(\"java.lang.String\")\n stats = StatFs(AndroidString(path))\n return {\n \"total\": stats.getBlockCountLong() * stats.getBlockSizeLong(),\n \"free\": stats.getAvailableBlocksLong() * stats.getBlockSizeLong(),\n }\n # with os.statvfs, we need to multiple block sizes by block counts to get bytes\n stats = os.statvfs(path)\n total = stats.f_frsize * stats.f_blocks\n free = stats.f_frsize * stats.f_bavail\n return {\"total\": total, \"free\": free, \"used\": total - free}",
"def get_ocn_disk_usage(path):\n # ---------------------------------------------------------------------\n logger.debug(\"get_ocn_disk_usage\")\n total_size = 0\n paths = glob.glob(path)\n for path in paths:\n total_size += get_disk_usage(path)\n return int(total_size)",
"def disk_usage(self):\n self.monitoring_object['disk_usage'] =\\\n psutil.disk_usage('/')",
"def disk_usage(pat):\n retval = {}\n dudu = os.popen('du -sk ' + pat)\n for outline in dudu:\n try:\n size, path = outline.split() # newline dumped.\n size = int(size)\n retval[path] = size\n except Exception, e:\n print \"Error (%s) getting size and path from %s.\" % (e, outline)\n return retval",
"def DiskUsage(cls):\n\t\t# >> df -iP\n\t\t# Sys. de fich. Inodes IUtil. ILib. IUti% Monte sur\n\t\t# /dev/sda1 915712 241790 673922 27% /\n\t\t# none 210977 788 210189 1% /dev\n\t\t# none 215028 19 215009 1% /dev/shm\n\t\t# none 215028 71 214957 1% /var/run\n\t\t# none 215028 2 215026 1% /var/lock\n\t\t# /dev/sda5 8364032 500833 7863199 6% /home\n\t\t# /home/sebastien/.Private 8364032 500833 7863199 6% /home/sebastien\n\t\tres = {}\n\t\tfor line in popen(\"df -kP\").split(\"\\n\")[1:-1]:\n\t\t\tline = RE_SPACES.sub(\" \", line).strip().split(\" \")\n\t\t\tsystem, inodes, used_inodes, free_inodes, usage, mount = line\n\t\t\ttry:\n\t\t\t\tusage = float(usage[:-1])\n\t\t\texcept ValueError:\n\t\t\t\tusage = 0\n\t\t\tres[mount] = float(usage) / 100.0\n\t\treturn res",
"def getSpaceUsage(path):\n st = os.statvfs(path)\n \n flash = { \"free\" : st.f_bavail * st.f_frsize, \"used\":(st.f_blocks - st.f_bfree) * st.f_frsize }\n \n #free = st.f_bavail * st.f_frsize\n #total = st.f_blocks * st.f_frsize\n #used = (st.f_blocks - st.f_bfree) * st.f_frsize\n return flash",
"def disk_used(path):\r\n size = 0\r\n for file in os.listdir(path) + ['.']:\r\n stat = os.stat(os.path.join(path, file))\r\n if hasattr(stat, 'st_blocks'):\r\n size += stat.st_blocks * 512\r\n else:\r\n # on some platform st_blocks is not available (e.g., Windows)\r\n # approximate by rounding to next multiple of 512\r\n size += (stat.st_size // 512 + 1) * 512\r\n # We need to convert to int to avoid having longs on some systems (we\r\n # don't want longs to avoid problems we SQLite)\r\n return int(size / 1024.)",
"def get_disk_usage():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><disk-space></disk-space></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def disk():\n sin = psutil.disk_usage('/')\n return round(sin.percent / 100, 3)",
"def get_disk_size(path='/'):\n if path is None:\n path = '/'\n try:\n st = os.statvfs(path)\n except OSError, e:\n display_error(\n 'Error getting disk space in %s: %s', path, str(e))\n return 0\n total = (st.f_blocks * st.f_frsize) / 1024\n return int(total)",
"def get_mount_usage(paths):\n\n mount_usage = {}\n for mount, stats in get_disk_usage().items():\n for path in paths:\n if (mount == get_mount_point(path)):\n mount_usage[path] = stats\n return mount_usage",
"def disk():\n run(env.disk_usage_command % env)",
"def fs_size(fs_path):\n import shutil\n\n total, used, free = shutil.disk_usage(fs_path)\n return total",
"def media_usage(path: str) -> typing.Dict[str, float]:\n stvf = os.statvfs(path)\n free = stvf.f_bavail * stvf.f_frsize\n total = stvf.f_blocks * stvf.f_frsize\n used = (stvf.f_blocks - stvf.f_bfree) * stvf.f_frsize\n\n return {'free': free, 'total': total, 'used': used}",
"def get_disk_usage():\n\n disk_usage = {}\n diskinfo = subprocess.Popen(['df','-P'], shell=False, stdout=subprocess.PIPE)\n diskinfo.stdout.readline()\n for line in diskinfo.stdout:\n disk_usage[line.split()[5]] = { 'filesystem' : line.split()[0], 'size' : int(line.split()[1]), \\\n'used' : int(line.split()[2]), 'avail' : int(line.split()[3]), 'capacity' : line.split()[4] }\n diskinfo = subprocess.Popen(['df','-i','-P'], shell=False, stdout=subprocess.PIPE)\n diskinfo.stdout.readline()\n for line in diskinfo.stdout:\n disk_usage[line.split()[5]].update( { 'iused' : int(line.split()[2]), 'ifree' : int(line.split()[3]), 'icapacity' : line.split()[4] } )\n return disk_usage",
"def space_usage(\n self, path=None, warning_level=None, previous_result=None,\n can_fail_build=False, name=None, **kwargs):\n path = path or self.m.path['start_dir']\n name = name or 'disk space usage'\n warning_level = warning_level or 0.9\n kwargs.setdefault(\n 'step_test_data',\n lambda: self.m.json.test_api.output_stream(\n self.test_api.space_usage_result()))\n\n if self.m.platform.is_win:\n # Not supported. Feel free to implement.\n return\n\n step = None\n try:\n step = self.m.python(\n name,\n self.resource('statvfs.py'),\n stdout=self.m.json.output(),\n args=[path],\n **kwargs)\n capacity_mb = step.stdout['capacity'] / 1024.0 / 1024.0\n used_mb = step.stdout['used'] / 1024.0 / 1024.0\n percent = used_mb / capacity_mb\n step.presentation.step_text = '%.2f/%.2f GiB (%d%%) used' % (\n used_mb / 1024.0, capacity_mb / 1024.0, percent * 100)\n if percent >= warning_level:\n step.presentation.status = self.m.step.WARNING\n if previous_result:\n step.presentation.step_text += '. Delta: %+.2f MiB' % (\n used_mb - previous_result['used'])\n return {\n 'capacity': capacity_mb,\n 'used': used_mb,\n }\n except Exception as ex:\n # Do not fail entire build because of a disk space step failure.\n if step:\n step.presentation.logs['exception'] = ['%r' % ex]\n step.presentation.status = self.m.step.WARNING\n if can_fail_build:\n raise recipe_api.StepFailure('Could not get disk info: %s' % ex)\n return",
"def get_used_size(path):\n if not os.path.exists(path):\n raise ValueError(\"%s is a non-existent path\" % path)\n f = os.statvfs(path)\n\n unavailBlocks = f[statvfs.F_BLOCKS] - f[statvfs.F_BAVAIL]\n used = long(unavailBlocks * f[statvfs.F_FRSIZE])\n\n return used",
"def get_disk_stats(self):\n if not self.init:\n print \"There are no initial statistics defined\"\n sys.exit(1)\n\n self.stats = self._load()\n self._deltas()\n return self.stats"
]
| [
"0.8543308",
"0.80476993",
"0.801462",
"0.7993059",
"0.7968749",
"0.79601806",
"0.7927285",
"0.784554",
"0.77313477",
"0.77012545",
"0.7633376",
"0.74749327",
"0.7420647",
"0.7416214",
"0.72969544",
"0.72623026",
"0.7055897",
"0.703122",
"0.693726",
"0.69074285",
"0.6878247",
"0.68183297",
"0.6764096",
"0.67578304",
"0.67456496",
"0.6704138",
"0.6612979",
"0.652428",
"0.64072126",
"0.63983434"
]
| 0.8119913 | 1 |
Explain everything available for the given metric. | def explain(self,
disp: bool=True) -> Union[None, str]:
# Find intersecting methods/attributes between MetricTextExplainer and provided metric.
inter = set(dir(self)).intersection(set(dir(self.metric)))
# Ignore private and dunder methods
metric_methods = [getattr(self, c) for c in inter if c.startswith('_') < 1]
# Call methods, join to new lines
s = "\n".join([f() for f in metric_methods if callable(f)])
if disp:
print(s)
else:
return s | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def explainerdashboard_cli(ctx):",
"def fetch_metrics(self):\n\n self.explain_all_indices()",
"def explain(self):",
"def help_explain(self):\n print(EXPLAIN)",
"def describe(self, metric):\n\n return self.fmt_metric.format(\n count=metric.value,\n name=metric.name.replace('_', ' ').capitalize(),\n values=str(self._values)\n )",
"def __show_all_metrics(self):\n for obj in self.metrics_list:\n self.__print_metrics_info(obj.get_name())\n print()",
"def help_analyze(self):\n print(ANALYZE)",
"def Explain(self, request, global_params=None):\n config = self.GetMethodConfig('Explain')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def describe():",
"def describe(self, metric):\n\n value = None\n\n if len(metric.value) > 0:\n value = metric.value[0].value\n\n return self.fmt_metric.format(\n expected=self._expected_string,\n name=metric.name.replace('_', ' ').capitalize(),\n operator=self._operator_str,\n value=value\n )",
"def __print_metrics_info(self, occurrence_metric):\n print(\" Name: \", self.get_metric_name(occurrence_metric))\n print(\" Type: Metric\")\n print(\" Description:\",\n self.get_metric_description(occurrence_metric))\n print(\" Formula: \", self.get_metric_formula(occurrence_metric))\n return 0",
"def print_metrics(result):\n logging.log(LOG_LEVEL_OUTPUT_INFO,\n '------------------------------------------------')\n logging.log(LOG_LEVEL_OUTPUT_INFO, ' KEY METRICS: ')\n logging.log(LOG_LEVEL_OUTPUT_INFO,\n '------------------------------------------------')\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* pages_count: %d',\n get_counter_metric(result, 'pages_count'))\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* revisions_count: %d',\n get_counter_metric(result, 'revisions_count'))\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* very_long_page_histories_count: %d',\n get_counter_metric(result, 'very_long_page_histories_count'))\n revisions_per_page_distr = get_distributions_metric(\n result, 'revisions_per_page_distr')\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* revisions_per_page_distr.mean: %d',\n revisions_per_page_distr.mean)\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* revisions_per_page_distr.sum: %d',\n revisions_per_page_distr.sum)\n cumulative_page_rev_size_distr = get_distributions_metric(\n result, 'cumulative_page_rev_size_distr')\n logging.log(LOG_LEVEL_OUTPUT_INFO,\n '* cumulative_page_rev_size_distr.mean: %d',\n cumulative_page_rev_size_distr.mean)\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* cumulative_page_rev_size_distr.sum: %d',\n cumulative_page_rev_size_distr.sum)",
"def list_metrics(self):\n pass",
"def print_metric(self):\r\n print(f'\\n\\n{self.sort} metric of size {self.n}')\r\n print(f'algorithm: {self.algo}')\r\n print(f'number of comparisons: {self.comps}')\r\n print(f'number of exchanges: {self.exs}')\r\n print(f'regression equation for comparisons: {self.comp_eq}')\r\n print(f'regression equation for exchanges: {self.ex_eq}')\r\n print(f'presorted data: {self.predata}')\r\n print(f'postsorted data: {self.postdata}')",
"def describe(self, *args, **kwargs):\n\t\treturn self.data.describe(*args, **kwargs)",
"def describe(self) -> str:",
"def dataset_statistics(dataset):\n print (dataset.describe())",
"def help(self, *args):\n for _, v in self.useage.items():\n print v.__doc__",
"def dataset_statistics(dataset):\n print(dataset.describe())",
"def dataset_statistics(dataset):\n print(dataset.describe())",
"def _describe(self) -> Dict[str, Any]:",
"def _print_summary(data, metric):\n\n print(u'Cortical thickness {}: {:.2f} \\u00B1 {:.2f} [{:.2f}--{:.2f}]'\n .format(metric, data[:, 0].mean(), data[:, 0].std(ddof=1),\n data[:, 0].min(), data[:, 0].max()))\n print('Other modalities {}: {:.2f} \\u00B1 {:.2f} [{:.2f}--{:.2f}]'\n .format(metric, data[:, 1:].mean(), data[:, 1:].std(ddof=1),\n data[:, 1:].min(), data[:, 1:].max()))\n print('Overall {}: {:.2f} \\u00B1 {:.2f} [{:.2f}--{:.2f}]'\n .format(metric, data.mean(), data.std(ddof=1),\n data.min(), data.max()))",
"async def query(self, metric):\n raise NotImplementedError()",
"def summary(app):\n click.echo(get_summary(app))",
"def print_list(self):\r\n print(\"Displaying each metric:\")\r\n print(\"======\")\r\n for metric in self.metrics:\r\n metric.whoami()\r\n print(\"======\")\r\n print(self.metrics)\r\n print(\"END\")\r\n print()",
"def describe_qual(df):\n\n categorical = df.dtypes[df.dtypes == \"object\"].index\n df[categorical].describe()",
"def show_metrics(y_true, y_pred, target_names):\n print(\"Hamming Loss: {}\".format(hamming_loss(y_true, y_pred)))\n print(\"Zero One Loss: {}\".format(zero_one_loss(y_true, y_pred)))\n print(\"Hamming Loss Non Zero: {}\\n\".format(hamming_loss_non_zero(y_true, np.array(y_pred))))\n print(classification_report(y_true, y_pred, target_names=target_names))",
"def print_metrics(self):\n output = \"\"\n metrics = self.get_all_metrics()\n for k, v in metrics.items():\n # Print the help line\n output += \"\\n# HELP {name} {help}\\n\".format(name=v['name'],\n help=v['help'])\n # and the type line\n output += \"# TYPE {name} {type}\\n\".format(name=v['name'],\n type=v['type'])\n for sample in v['values']:\n labels = json.loads(sample, object_pairs_hook=OrderedDict)\n if v['type'] == 'histogram' and labels.get('le') == '_sum':\n labels.pop('le', None)\n mname = '{name}_sum'.format(name=v['name'])\n elif v['type'] == 'histogram' and labels.get('le') == '+Inf':\n labels.pop('le', None)\n mname = '{name}_count'.format(name=v['name'])\n elif v['type'] == 'histogram':\n mname = '{name}_bucket'.format(name=v['name'])\n else:\n mname = v['name']\n output += \"{name}{labels} {value}\\n\".format(name=mname,\n labels=self.format_labels(labels),\n value=self.format_value(v['values'][sample]))\n return output",
"def display_metric(metrics_to_print, results, num_refs, args):\n for metric, result in zip(metrics_to_print, results):\n if metric == 'bleu':\n if args.score_only:\n print('{0:.{1}f}'.format(result.score, args.width))\n else:\n version_str = bleu_signature(args, num_refs)\n print(result.format(args.width).replace('BLEU', 'BLEU+' + version_str))\n\n elif metric == 'chrf':\n if args.score_only:\n print('{0:.{1}f}'.format(result.score, args.width))\n else:\n version_str = chrf_signature(args, num_refs)\n print('chrF{0:d}+{1} = {2:.{3}f}'.format(args.chrf_beta, version_str, result.score, args.width))",
"def _explain_model(self):\n raise NotImplementedError"
]
| [
"0.648503",
"0.6382337",
"0.6231688",
"0.6109485",
"0.60351574",
"0.601566",
"0.59919626",
"0.59380907",
"0.58609015",
"0.5851201",
"0.57744217",
"0.5731696",
"0.5674738",
"0.5631782",
"0.56174207",
"0.5617261",
"0.5616795",
"0.5559729",
"0.55428743",
"0.55428743",
"0.55223894",
"0.5488083",
"0.54822916",
"0.54723936",
"0.545609",
"0.5455931",
"0.5431996",
"0.540565",
"0.5394422",
"0.53868455"
]
| 0.6511468 | 0 |
Populates `object` with `attributes` from a mapping. | def set_attributes(object, attributes):
for name, attribute in attributes.items():
setattr(object, name, attribute) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_attributes_from_dict(self, dict):\n for key in dict:\n val = dict[key]\n if hasattr(self, key):\n setattr(self, key, val)",
"def update(self, f):\n\n for p in self.__mapper__.attrs:\n\n if p.key == 'oid':\n continue\n try:\n setattr(self, p.key, getattr(f, p.key))\n\n except AttributeError:\n # The dict() method copies data property values into the main dict,\n # and these don't have associated class properties.\n continue",
"def update(self, f):\n\n for p in self.__mapper__.attrs:\n\n if p.key == 'oid':\n continue\n try:\n setattr(self, p.key, getattr(f, p.key))\n\n except AttributeError:\n # The dict() method copies data property values into the main dict,\n # and these don't have associated class properties.\n continue",
"def set_attr_from_dict(self, dictionary):\n for key in dictionary:\n self.__setattr__(key, dictionary.get(key))",
"def set_attributes_from_dictionary(self, dict):\n for key in dict:\n self.__dict[key] = dict[key]\n self.__nonzero = True",
"def update(self, mapping):\n if not ismapping(mapping):\n raise TypeError(\"mapping type required\")\n field_names = getpyattr(type(self), 'field_names')\n for key, value in mapping.items():\n if key in field_names:\n setattr(self, key, value)",
"def from_dict(cls, dict_object):\n\n return cls(**dict_object)",
"def __init__(self, mapping: Mapping[str, Any]) -> None:\n self.__dict__.update(mapping)",
"def update_from_dict(self, dictionary):\n for key in dictionary:\n setattr(self, key, dictionary[key])\n return self.to_dict()",
"def from_dict(self, data, partial_update=True):\n fs = ['map_id', 'map_name', 'resource_path', 'longitude', 'latitude']\n for f in fs:\n try:\n setattr(self, f, data[f])\n except KeyError:\n if not partial_update:\n raise InvalidModelUsage(\n \"change map information from dict fail, field [%s] missed!\" % f,\n MAP_ERROR)",
"def _get_dict(self, obj):\n\n for attr, value in obj.__dict__.items():\n self.__dict__[attr] = value",
"def from_dict(self, dict_=None):\n for key in dict_:\n if hasattr(self, key):\n setattr(self, key, dict_[key])",
"def load_from_dict(self, dict_):\n for key, value in six.iteritems(dict_):\n setattr(self, util.as_attr(key), value)\n self._check_against_schema()",
"def merge(self, new_attributes):\n for k, v in new_attributes.items():\n setattr(self, k, v)",
"def from_dict(self, dict_=None):\n for key, value in dict_.items():\n if hasattr(self, key):\n setattr(self, key, value)",
"def __init__(self, adict):\n\n self.__dict__.update(adict)\n\n for k, v in adict.items():\n if isinstance(v, dict):\n self.__dict__[k] = ParamObject(v)",
"def from_dict(cls, _dict: Dict) -> 'Resource':\n args = {}\n if 'attributes' in _dict:\n args['attributes'] = [Attribute.from_dict(x) for x in _dict.get('attributes')]\n return cls(**args)",
"def update_attributes_map(self, extended_attributes,\n extension_attrs_map=None):\n if not extension_attrs_map:\n return\n\n for resource, attrs in extension_attrs_map.items():\n extended_attrs = extended_attributes.get(resource)\n if extended_attrs:\n attrs.update(extended_attrs)",
"def update(object, **attrs):\n updated = {}\n for key, value in attrs.items():\n if not hasattr(object, key) or getattr(object, key) != attrs[key]:\n updated[key] = value\n setattr(object, key, value)\n return updated",
"def updateFromDict(self, data):\n for key, value in data.items():\n setattr(self, key, value)",
"def _parseDictionary(self):\n for i in self.na_dict.keys():\n setattr(self, i, self.na_dict[i])",
"def set_attrs(self, **kwargs) -> None:\n self._obj.coords[GEO_MAP_COORD].attrs.update(**kwargs)",
"def set(self, model_object: dict) -> None:\n for k, v in model_object.items():\n if hasattr(self, k):\n setattr(self, str(k), v)",
"def update_attributes_with_dict(self, attribute_dict):\n\n for attribute_name in attribute_dict.keys():\n \n self.update_attribute(attribute_name, attribute_dict[attribute_name])",
"def fromdict(cls, mapping):\n ud = cls()\n for k in mapping:\n v = dict.__getitem__(mapping, k) # okay for py2/py3\n if isinstance(v, dict):\n v = cls.fromdict(v)\n dict.__setitem__(ud, k, v)\n return ud",
"def _set_attributes(self, model):\n\n if model:\n self._get_dict(model)",
"def add_attributes(self, x):\n for k, v in x.items():\n setattr(self, k, v)",
"def __init__(self, **attributes):\n for key, value in attributes.items():\n setattr(self, key, value)",
"def _set_attrs(ds, **attrs_map):\n for key in attrs_map:\n val = attrs_map[key] # Use Python 2/3 agnostic style\n ds.attrs[key] = val",
"def initobj(obj, attrs):\n for a in obj.InstAttr:\n if a != 'id' and a in attrs:\n setattr(obj, a, attrs[a])"
]
| [
"0.65004426",
"0.62904537",
"0.62904537",
"0.62468666",
"0.62062573",
"0.6180569",
"0.61680084",
"0.61148757",
"0.6112813",
"0.6051751",
"0.6012942",
"0.5996072",
"0.59575933",
"0.59540755",
"0.5932773",
"0.5913263",
"0.58975583",
"0.5868232",
"0.5862673",
"0.5854236",
"0.5826926",
"0.58109045",
"0.5810342",
"0.5808347",
"0.577426",
"0.57721555",
"0.5756854",
"0.5737463",
"0.5718664",
"0.56956404"
]
| 0.6646748 | 0 |
Populates `object` with attributes from `kwargs` as defined by the default mapping `defaults`. If an item is contained in `kwargs` that is not defined | def set_attributes_from_kwargs(object, kwargs, defaults):
set_attributes(
object,
dict((key, kwargs.pop(key, value)) for key, value in defaults.items())
)
if kwargs:
raise TypeError(
"set_attributes_from_kwargs() got an unexpected keyword argument "
"%r" % kwargs.popitem()[0]
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def initDefaults(self, kwargs):\n \n for k,v in self.defaults.iteritems():\n if k in kwargs: # use assigned values\n setattr(self, k, kwargs[k])\n else: # use default values\n setattr(self, k, v)\n \n for k,v in kwargs.iteritems():\n if k not in self.defaults:\n setattr(self, k, v)\n pass",
"def defaults(self, **kwargs):\n for i in kwargs:\n self._.setdefault(i, kwargs[i])\n return self",
"def set_object(**kwargs):\n this_dict = {}\n for key, value in kwargs.items():\n this_dict[key] = value\n return this_dict",
"def with_defaults(self, obj):\n self.check_valid_keys(obj)\n obj = dict(obj)\n for (key, value) in self.defaults.items():\n if key not in obj:\n obj[key] = value\n return obj",
"def _post_processing(\n kwargs, skip_translate, invalid\n): # pylint: disable=unused-argument\n # If any defaults were not expicitly passed, add them\n for item in DEFAULTS:\n if item not in kwargs:\n kwargs[item] = DEFAULTS[item]",
"def __init__(self, **kwargs):\n \n default_attr = dict(username='')\n\n allowed_attr = list(default_attr)\n default_attr.update(kwargs)\n\n for key in default_attr:\n if key in allowed_attr:\n self.__dict__[key] = default_attr.get(key)",
"def update_model_kwargs_logic(default_kwargs: dict = None, user_kwargs: dict = None):\n out = {}\n if default_kwargs is None:\n default_kwargs = {}\n if user_kwargs is None:\n user_kwargs = {}\n\n # Check valid kwargs\n for iter_key in user_kwargs.keys():\n if iter_key not in default_kwargs:\n raise ValueError(\"Model kwarg {0} not in default arguments {1}\".format(iter_key, default_kwargs.keys()))\n\n out.update(default_kwargs)\n\n # Nested update of kwargs:\n def _nested_update(dict1, dict2):\n for key, values in dict2.items():\n if key not in dict1:\n print(\"WARNING:kgcnn: Unknown model kwarg {0} with value {1}\".format(key, values))\n dict1[key] = values\n else:\n if isinstance(dict1[key], dict) and isinstance(values, dict):\n # The value is a dict of model arguments itself. Update the same way.\n dict1[key] = _nested_update(dict1[key], values)\n elif isinstance(dict1[key], dict) and not isinstance(values, dict):\n # If values is None, means no information, keep dict1 values untouched.\n if values is not None:\n raise ValueError(\"Can not overwriting dictionary of {0} with {1}\".format(key, values))\n else:\n # Just any other value to update\n dict1[key] = values\n return dict1\n\n return _nested_update(out, user_kwargs)",
"def update_with_defaults(**kwargs):\n # Update the defaults with the input values\n with open(DEFAULTS, \"r\") as f:\n defaults = json.load(f)\n return _update(kwargs, defaults)",
"def __init__(self, *args, **kwargs):\n for dictionary in [_ for _ in args if isinstance(_, dict)]:\n for key in dictionary:\n setattr(self, key, dictionary[key])\n for key in kwargs:\n setattr(self, key, kwargs[key])",
"def with_init(attrs, defaults=None):\n if defaults is None:\n defaults = {}\n\n def init(self, *args, **kw):\n for a in attrs:\n try:\n v = kw.pop(a)\n except KeyError:\n try:\n v = defaults[a]\n except KeyError:\n raise ValueError(\"Missing value for '{0}'.\".format(a))\n setattr(self, a, v)\n self.__original_init__(*args, **kw)\n\n def wrap(cl):\n cl.__original_init__ = cl.__init__\n cl.__init__ = init\n return cl\n\n return wrap",
"def fill(self, **kwargs):\r\n for name in kwargs.keys():\r\n setattr(self, name, kwargs[name])\r\n return self",
"def update_from_kwargs(self, **kwargs):\n for (key, value) in kwargs.items():\n setattr(self, key, value)",
"def make_object(obj, kwargs):\n return obj(**kwargs)",
"def __init__(self, *args, **kwargs):\n \n for arg in args:\n if isinstance(arg, dict):\n for key, value in arg.items():\n self[key] = value\n if hasattr(arg, \"__dict__\"):\n for key, value in arg.__dict__.items():\n self[key] = value\n\n if kwargs:\n for key, value in kwargs.items():\n self[key] = value",
"def set_defaults(sender, **kwargs):\n instance = kwargs.get('instance')\n for thing in dir(instance):\n empty, duck, field_name = thing.partition('get_default_')\n if not empty and duck and hasattr(instance, field_name):\n value = getattr(instance, field_name)\n if value in (None, ''):\n default_value = getattr(instance, thing)\n if callable(default_value):\n default_value = default_value()\n setattr(instance, field_name, default_value)",
"def __init__(self, **kwargs):\n\n for (k, v) in self._fields:\n if k in kwargs:\n self.__dict__[k] = v.validate(kwargs[k])\n self.__dict__[k] = v.default",
"def get_meta_info_from_object(obj, **defaults):\n meta = getattr(obj, 'Meta', object())\n dct = dict(\n (atr_name, getattr(meta, atr_name, None))\n for atr_name in dir(meta)\n if not atr_name.startswith('_')\n )\n\n if defaults:\n for k, v in defaults.items():\n set_default_to_meta(meta, k, v)\n\n return dct",
"def __init__( self, **kwargs ):\n self.__dict__.update( kwargs )",
"def _prepare_kwargs(self, factory, factory_args, factory_kw):\n defaults = get_argdefaults(factory, len(factory_args))\n\n for arg, default in defaults.iteritems():\n if arg in factory_kw:\n continue\n elif arg in self.factories:\n defaults[arg] = self.get(arg)\n elif default is NO_DEFAULT:\n raise KeyError(\"No factory for arg: %s\" % arg)\n\n defaults.update(factory_kw)\n return defaults",
"def __init__(self, **kwargs):\n self.__dict__.update(kwargs)",
"def __init__(self, **kwargs):\n self.__dict__.update(kwargs)",
"def setup_dict(data, required=None, defaults=None):\n required = required or []\n for i in set(required) - set(data):\n raise IndexError(\"Missed: %s\" % i)\n\n defaults = defaults or {}\n for i in set(data) - set(required) - set(defaults):\n raise ValueError(\"Unexpected: %s\" % i)\n\n defaults.update(data)\n return defaults",
"def setup_dict(data, required=None, defaults=None):\n required = required or []\n for i in set(required) - set(data):\n raise IndexError(\"Missed: %s\" % i)\n\n defaults = defaults or {}\n for i in set(data) - set(required) - set(defaults):\n raise ValueError(\"Unexpected: %s\" % i)\n\n defaults.update(data)\n return defaults",
"def __init__(self, _dict=None, **kwargs):\n \n if _dict is not None:\n self.__dict__.update(_dict)\n self.__dict__.update(kwargs)",
"def new_from_jsondict(cls, data, param_defaults, **kwargs):\n\n json_data = data.copy()\n if kwargs:\n for key, val in kwargs.items():\n json_data[key] = val\n\n c = cls(param_defaults, **json_data)\n c._json = data\n return c",
"def __init__(self, **kwargs):\n # loop over the given kwargs\n for key, value in kwargs.items():\n # treat them like attribute assignments\n setattr(self, key, value)",
"def _set_default_attrs(self, obj, subs):\n for attr_name, attr_subs in subs.iteritems():\n if not getattr(obj, attr_name, None):\n for newattr_name, newattr_val in attr_subs.iteritems():\n setattr(obj, newattr_name, newattr_val)",
"def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)",
"def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)",
"def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)"
]
| [
"0.7175331",
"0.6747151",
"0.6592914",
"0.6470292",
"0.64214796",
"0.6388719",
"0.6304692",
"0.6216469",
"0.610586",
"0.6088044",
"0.6087905",
"0.60436976",
"0.601557",
"0.6001094",
"0.59770507",
"0.59549373",
"0.59283787",
"0.5908451",
"0.59080917",
"0.5891961",
"0.5891961",
"0.58829397",
"0.58829397",
"0.5845942",
"0.5819103",
"0.5799098",
"0.578488",
"0.5725017",
"0.5725017",
"0.5725017"
]
| 0.827948 | 0 |
Returns ``list(object)`` or a list containing object. | def force_list(object):
try:
return list(object)
except TypeError:
return [object] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def as_list(obj):\n return obj if isinstance(obj, list) else [obj]",
"def carries_object(obj=None): #py:carries_object\n if obj is not None:\n ans = RUR._carries_object_(obj)\n else:\n ans = RUR._carries_object_()\n return list(ans)",
"def _to_list(obj):\n if not isinstance(obj, list):\n return [obj]\n else:\n return obj",
"def listify(obj):\n if obj is None:\n # When the object is None, an empty list will be returned\n return []\n elif isinstance(obj, list):\n # When the object is already a list, that list will be returned\n return obj\n\n # When a single object is passed to the method, a list with the\n # object as single item will be returned\n return [obj]",
"def obj_list(self):\n return self._obj_list",
"def listify(obj):\n return obj if isinstance(obj, (list, tuple, type(None))) else [obj]",
"def object_here(obj=None): #py:object_here\n if obj is not None:\n ans = RUR._object_here_(obj)\n else:\n ans = RUR._object_here_()\n return list(ans) # convert from JS list-like object to proper Python list",
"def coerce_to_list(obj) -> list:\n if obj is None:\n return []\n elif isinstance(obj, list):\n return obj\n else:\n return [obj]",
"def _sdk_object_to_list(object):\n result_list = []\n for item in object:\n result_list.append(_get_sdk_object_dict(item))\n return result_list",
"def listify(obj):\n if obj is None:\n return []\n else:\n return obj if isinstance(obj, (list, tuple, type(None))) else [obj]",
"def listify(obj):\n if obj is None:\n return []\n else:\n return obj if isinstance(obj, (list, tuple, type(None))) else [obj]",
"def listify(obj):\n if obj is None:\n return []\n else:\n return obj if isinstance(obj, (list, tuple, type(None))) else [obj]",
"def carries_object(self, obj=''): #py:UR.carries_object\n if obj is not None:\n return list(RUR._UR.carries_object_(self.body, obj))\n else:\n return list(RUR._UR.carries_object_(self.body))",
"def as_tuple_or_list(obj):\n return obj if isinstance(obj, (list, tuple)) else [obj]",
"def to_list(obj, list_cls=list):\n if obj is None:\n return list_cls()\n if isinstance(obj, list_cls):\n return obj\n if isinstance(obj, (unicode, str)):\n return list_cls((obj,))\n if isinstance(obj, (list, tuple, set, frozenset)) or hasattr(obj, '__iter__'):\n return list_cls(obj)\n return list_cls((obj,))",
"def all(self, *args, **kwargs):\n list_to_return = []\n if not self.object_type:\n return list_to_return\n class_name = eval(self.object_type)\n if self.objects_id:\n for id in self.objects_id.split(';'):\n if id:\n list_to_return.append(class_name.objects.get(id=id))\n return list_to_return",
"def objects_rst(self):\n return [_.as_rst for _ in self.objects]",
"def objects (self):\n return InternalObjectList (self)",
"def object_here(self, obj=None): #py:UR.object_here\n if obj is not None:\n return list(RUR._UR.object_here_(self.body, obj))\n else:\n return list(RUR._UR.object_here_(self.body))",
"def list_objects(self, path):\n return [x for x in self.list_objects_generator(path)]",
"def serialize_list(self, obj):\n return self.serialize_tuple(obj)",
"def list(self) -> list:\n return list(self)",
"def list(self):\n return self._get_list()",
"def to_list(obj: Any, size: int = 1):\n if not is_list_like(obj):\n return [obj] * int(size)\n else:\n return obj",
"def list() -> List:\n pass",
"def get_objects(self):\n return self._objects",
"def all(self):\n return (self.__objects)",
"def GetObjects(self): \r\n return self.model.GetObjects()",
"def make_list(obj: Any, make_list_list: bool = False) -> List:\r\n\r\n # Check whether any object is a list, this is important for eg:\r\n # [None, []]\r\n any_list = False\r\n only_none = True\r\n if type(obj) is list:\r\n for o in obj:\r\n if o:\r\n only_none = False\r\n if type(o) is list:\r\n any_list = True\r\n\r\n if make_list_list and type(obj) is list and not any_list and not only_none:\r\n return [obj]\r\n elif type(obj) is list:\r\n return obj\r\n else:\r\n return [obj]",
"def ensure_list(obj, allow_tuple=True):\n if isinstance(obj, list):\n return obj\n\n elif allow_tuple and isinstance(obj, tuple):\n return obj\n elif not allow_tuple and isinstance(obj, tuple):\n return list(obj)\n else:\n return [obj]"
]
| [
"0.7728477",
"0.7434849",
"0.7419359",
"0.7394744",
"0.7351581",
"0.7202986",
"0.71388775",
"0.7117519",
"0.70312494",
"0.70102775",
"0.70102775",
"0.70102775",
"0.6860107",
"0.6844003",
"0.6799226",
"0.6786213",
"0.66601646",
"0.6570251",
"0.6544676",
"0.6526964",
"0.6492086",
"0.64634824",
"0.6457995",
"0.6418779",
"0.64157474",
"0.6406851",
"0.63952285",
"0.6391212",
"0.63729453",
"0.63492906"
]
| 0.79207885 | 0 |
Splits the given length `n` into a larger and a smaller part using the golden ratio to determine a "perfect" split. | def golden_split(n):
large = n / GOLDEN_RATIO
small = n - large
large = int(round(large))
small = int(round(small))
return large, small | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def split(a, n):\n k, m = divmod(len(a), n)\n ret = [a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n)]\n return ret",
"def split(a, n):\n n = min(n, len(a))\n k, m = divmod(len(a), n)\n return [a[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n)]",
"def splitInBlocks (l, n):\n k = len(l) / n\n r = len(l) % n\n\n i = 0\n blocks = []\n while i < len(l):\n if len(blocks)<r:\n blocks.append(l[i:i+k+1])\n i += k+1\n else:\n blocks.append(l[i:i+k])\n i += k\n\n return blocks",
"def partition(data, n):\n splits = []\n remaining = data.copy(deep=True)\n for i in range(n):\n split = remaining.sample(frac=1/(n-i), random_state=10)\n splits.append(split)\n remaining = remaining.drop(split.index)\n return splits",
"def greedy_split(arr, n, axis=0):\n length = arr.shape[axis]\n # compute the size of each of the first n-1 blocks\n block_size = int(np.ceil(length / float(n)))\n # the indices at which the splits will occur\n ix = np.arange(block_size, length, block_size)\n return np.array(np.split(arr, ix, axis))",
"def split(a, N):\n\n integ = int(len(a) / N)\n remain = int(len(a) % N)\n\n splitted = [a[i * integ + min(i, remain):(i + 1) * integ +\n min(i + 1, remain)] for i in range(N)]\n\n return splitted",
"def split_into_chunks(x, n):\n csize = int(np.ceil(len(x) / n))\n out = list()\n \n i = 0\n while i * csize < len(x):\n out.append(x[(i * csize):(i * csize + csize)])\n i += 1\n\n return out",
"def divide_chunks(a_list, n):\n return [a_list[i:i + n] for i in range(0, len(a_list), n)]",
"def split_into_n(s, n):\n return [s[k:k + n] for k in range(0, len(s), n)]",
"def random_splits(s, n, nsplits=2):\n splits = sorted([random.randint(0, n) for _ in range(nsplits - 1)])\n splits = [0] + splits + [n]\n for begin, end in zip(splits, splits[1:]):\n yield s[begin:end]",
"def split_range(r, n):\n \n step = int(r / n)\n segments = []\n for i in range(n):\n new_segment = [step * i, step * (i + 1)]\n segments.append(new_segment)\n # correct the gap in the missing index due to the truncated step\n segments[-1][-1] = r\n return segments",
"def get_splits(ntot, nper):\n beglist = numpy.arange(0,ntot,nper)\n endlist = numpy.arange(0,ntot,nper) + nper - 1\n\n if (ntot % nper) != 0:\n endlist[-1] = ntot-1\n return beglist, endlist",
"def _split(self, c, n):\n\tsubsets = []\n\tstart = 0\n\tfor i in range(n):\n\t subset = c[start:start + (len(c) - start) / (n - i)]\n\t subsets.append(subset)\n\t start = start + len(subset)\n\treturn subsets",
"def test_n_group_split(self):\n # Test 2 groups like HalfSplitter first\n hs = NGroupPartitioner(2)\n\n for isreversed, splitter in enumerate((hs, hs)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in hs.generate(self.data) ]\n self.assertTrue(len(splits) == 2)\n\n for i, p in enumerate(splits):\n self.assertTrue( len(p) == 2 )\n self.assertTrue( p[0].nsamples == 50 )\n self.assertTrue( p[1].nsamples == 50 )\n\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n\n # check if it works on pure odd and even chunk ids\n moresplits = [ list(spl.generate(p)) for p in hs.generate(splits[0][0])]\n\n for split in moresplits:\n self.assertTrue(split[0] != None)\n self.assertTrue(split[1] != None)\n\n # now test more groups\n s5 = NGroupPartitioner(5)\n\n # get the splits\n for isreversed, s5splitter in enumerate((s5, s5)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in s5splitter.generate(self.data) ]\n\n # must have 10 splits\n self.assertTrue(len(splits) == 5)\n\n # check split content\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [2, 3, 4, 5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [2, 3])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 4, 5, 6, 7, 8, 9])\n # ...\n assert_array_equal(splits[4][1-isreversed].sa['chunks'].unique,\n [8, 9])\n assert_array_equal(splits[4][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4, 5, 6, 7])\n\n\n # Test for too many groups\n def splitcall(spl, dat):\n return list(spl.generate(dat))\n s20 = NGroupPartitioner(20)\n self.assertRaises(ValueError,splitcall,s20,self.data)",
"def split_list(l, n):\n n *= 2\n returned_list = [l[i: i + n] for i in range(0, len(l), n)]\n return returned_list",
"def _compute_split_boundaries(split_probs, n_items):\n if len(split_probs) > n_items:\n raise ValueError(\n 'Not enough items for the splits. There are {splits} '\n 'splits while there are only {items} items'.format(\n splits=len(split_probs), items=n_items\n )\n )\n total_probs = sum(p for name, p in split_probs)\n if abs(1 - total_probs) > 1e-8:\n raise ValueError('Probs should sum up to 1. probs={}'.format(split_probs))\n split_boundaries = []\n sum_p = 0.0\n for name, p in split_probs:\n prev = sum_p\n sum_p += p\n split_boundaries.append((name, int(prev * n_items), int(sum_p * n_items)))\n\n # Guard against rounding errors.\n split_boundaries[-1] = (\n split_boundaries[-1][0],\n split_boundaries[-1][1],\n n_items,\n )\n\n return split_boundaries",
"def split_list(original_list, n):\n if len(original_list) <= n:\n final_list = [original_list, ]\n else:\n final_list = []\n bit_size = len(original_list) / n\n for i in range(n):\n final_list.append(original_list[i*bit_size:(i+1)*bit_size])\n\n return final_list",
"def _get_split_sizes(self, n_examples):\n\n min_ex = (int(n_examples // self.n_splits)\n * np.ones(self.n_splits, dtype=np.int8))\n \n rem = np.array(\n [1 if i < n_examples % self.n_splits else 0\n for i in range(self.n_splits)],\n dtype=np.int8)\n\n return np.add(min_ex, rem)",
"def even_split(a, n):\n n = min(n, len(a)) # if less elements in array than chunks to output, change chunks to array length\n k, m = divmod(len(a), n)\n return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))",
"def chunks(l, n):\n for i in range(0, len(l) // n * n + n - 1, n):\n if len(l[i:i + n]):\n yield l[i:i + n]",
"def near_split(x, num_bins=None, size_bins=None):\n if num_bins:\n quotient, remainder = divmod(x, num_bins)\n return [quotient + 1] * remainder + [quotient] * (num_bins - remainder)\n elif size_bins:\n return near_split(x, num_bins=int(np.ceil(x / size_bins)))",
"def near_split(x, num_bins=None, size_bins=None):\n if num_bins:\n quotient, remainder = divmod(x, num_bins)\n return [quotient + 1] * remainder + [quotient] * (num_bins - remainder)\n elif size_bins:\n return near_split(x, num_bins=int(np.ceil(x / size_bins)))",
"def list_split(self, l1, n=1):\n if (len(l1) % n) == 0:\n m = len(l1) // n\n else:\n m = len(l1) // n + 1\n l2 = [l1[i * n:(i + 1) * n] for i in range(m)]\n return l2",
"def evenquerychunks(l, n):\n\n l = list(l)\n \n import math\n n = int(math.floor(len(l)/float(n))) + 1\n print len(l)\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n results = []\n for i in xrange(0, len(l), n):\n results.append( l[i:i+n])\n \n return results",
"def lap_split_n(img, n):\n levels = []\n\n print(\"inside lap_split_n function \")\n\n for i in range(n):\n img, hi = lap_split(img)\n levels.append(hi)\n levels.append(img)\n return levels[::-1]",
"def make_chunks(l, n):\n return [l[i:i+n] for i in range(0, len(l), n)]",
"def chunks(seq, n):\n assert len(seq) > n\n avg = len(seq) / float(n)\n out = []\n last = 0\n while round(last) < len(seq):\n out.append(seq[round(last):round(last + avg)])\n last += avg\n return out",
"def evenchunks(l, n):\n if type(l) <> list:\n l = list(l)\n \n import math\n n = int(math.floor(len(l)/float(n))) + 10\n print len(l)\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in xrange(0, len(l), n):\n yield l[i:i+n]",
"def split(n):\n rest_of_num, last_num = n // 10, n % 10\n return rest_of_num, last_num",
"def test_chunk_size_priority_over_n_splits(self):\n with self.subTest(input='list', chunk_size=1, n_splits=6):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=None, chunk_size=1, n_splits=6, n_jobs=None), 13)\n with self.subTest(input='numpy', chunk_size=1, n_splits=6):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=None, chunk_size=1, n_splits=6,\n n_jobs=None), 100)\n\n with self.subTest(input='list', chunk_size=3, n_splits=3):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=None, chunk_size=3, n_splits=3, n_jobs=None), 5)\n with self.subTest(input='numpy', chunk_size=3, n_splits=3):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=None, chunk_size=3, n_splits=3,\n n_jobs=None), 34)"
]
| [
"0.68365747",
"0.6783161",
"0.6657712",
"0.63748825",
"0.6367787",
"0.6341123",
"0.6298543",
"0.6281955",
"0.6213982",
"0.6207446",
"0.6180709",
"0.6173117",
"0.6164623",
"0.6087013",
"0.6043045",
"0.6024858",
"0.59773743",
"0.5975298",
"0.59608966",
"0.5950952",
"0.59455067",
"0.59455067",
"0.5939249",
"0.59130764",
"0.5912948",
"0.5909719",
"0.5861007",
"0.5841967",
"0.5822916",
"0.5800953"
]
| 0.8150969 | 0 |
Raises a CommandDictSanityError exception if the command dictionary is not considered "sane". | def assert_sanity(self):
# Maybe in the future: Check whether commands can be found in path
# For now, let the OS handle this
# Check whether command dictionary has a correct structure. Namely,
# that:
#
# 1. Toplevel children may only be called "commands" or "paths".
if len(self.command_dict) > 2:
raise CommandDictSanityError("Only two toplevel children allowed.")
for key in self.command_dict.keys():
if key not in ("commands","paths"):
raise CommandDictSanityError(
f"Invalid toplevel child found: {key}.")
# 2. "paths" node must be a list, and must only contain string
# children.
if "paths" in self.command_dict:
if type(self.command_dict["paths"]) != list:
raise CommandDictSanityError(
"The \"paths\" node must be a list.")
for path in self.command_dict["paths"]:
if type(path) != str:
raise CommandDictSanityError("Defined paths must be strings.")
# 3. "commands" node chilren (henceforth command nodes) must be
# dictionaries,
# 4. and may contain only the following keys:
# "regex", "cmd", "help", "markdown_convert", "formatted",
# "code" and "split".
# 5. The command node children may only be strings.
# 6. Command node children with keys "markdown_convert",
# "formatted" or "code" may only be defined as "true" or as
# "false".
if "commands" in self.command_dict.keys():
for com in self.command_dict["commands"]:
# Implement rule 3
if type(self.command_dict["commands"][com]) != dict:
raise CommandDictSanityError(
"Defined commands must be dictionaries.")
for opt in self.command_dict["commands"][com].keys():
# Implement rule 4
if opt not in ("regex",
"cmd",
"help",
"markdown_convert",
"formatted",
"code",
"split"):
raise CommandDictSanityError(
f"In command \"{com}\", invalid option found: " \
f"\"{opt}\".")
# Implement rule 6
elif opt in ("markdown_convert", "formatted", "code"):
if type(self.command_dict["commands"][com][opt]) != bool:
raise CommandDictSanityError(
f"In command \"{com}\", invalid value for option "
f"\"{opt}\" found: " \
f"\"{self.command_dict['commands'][com][opt]}\"")
# Implement rule 5
else:
if type(self.command_dict["commands"][com][opt]) != str:
raise CommandDictSanityError(
f"In command \"{com}\", command option " \
f"\"{opt}\" must be a string.")
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_command_dict(self, command):\n\n if not isinstance(command, dict):\n self.fail(\"Command must be a dictionary\")\n\n # NOTE(pboldin): Here we check for the values not for presence of the\n # keys due to template-driven configuration generation that can leave\n # keys defined but values empty.\n if command.get(\"interpreter\"):\n script_file = command.get(\"script_file\")\n if script_file:\n if \"script_inline\" in command:\n self.fail(\n \"Exactly one of script_inline or script_file with \"\n \"interpreter is expected: %r\" % command)\n # User tries to upload a shell? Make sure it is same as interpreter\n interpreter = command.get(\"interpreter\")\n interpreter = (interpreter[-1]\n if isinstance(interpreter, (tuple, list))\n else interpreter)\n if (command.get(\"local_path\")\n and command.get(\"remote_path\") != interpreter):\n self.fail(\n \"When uploading an interpreter its path should be as well\"\n \" specified as the `remote_path' string: %r\" % command)\n elif not command.get(\"remote_path\"):\n # No interpreter and no remote command to execute is given\n self.fail(\n \"Supplied dict specifies no command to execute, either \"\n \"interpreter or remote_path is required: %r\" % command)\n\n unexpected_keys = set(command) - {\"script_file\", \"script_inline\",\n \"interpreter\", \"remote_path\",\n \"local_path\", \"command_args\"}\n if unexpected_keys:\n self.fail(\n \"Unexpected command parameters: %s\" % \", \".join(\n unexpected_keys))",
"def test_badly_formatted_entry(self):\n with pytest.raises(AssertionError) as exc_info:\n list(parser.generate_commands(yaml.load(\"\"\"\n - key1: 1\n key2: 2\n \"\"\")))\n assert \"Command has multiple top-level keys: ['key1', 'key2']\" in str(exc_info.value)",
"def test_bad_command2(self):\n with self.assertRaises(KeyError):\n command = Command['Fake Command2']",
"def _validate(self):\n assert type(self.cmd) is dict\n assert self.cmd.keys() == {\"operation\",\"data\"}\n assert self.cmd[\"operation\"] == self._class_operation()",
"def test_bad_command1(self):\n with self.assertRaises(ValueError):\n command = Command('Fake Command1')",
"def validate_command(command):\n return command in list(VALID_COMMANDS.keys())",
"def testIncompleteDict(self):\n pl = Pipeline(loadInitFile=False)\n repl = REPL(pl)\n repl.runCommandLine('{ \"a\": 6, \"b\": ')\n # Is this next line correct? Should it be None?\n self.assertIs(None, pl.stdin)\n self.assertEqual(REPL.DEFAULT_PS2, repl.prompt)",
"def is_valid_command(command):\n return is_get(command) or is_insert(command) or is_update(command) or is_delete(command) or is_showall(command) or is_search(command)",
"def sanity_checker(self, command):\n\n index = self.indexer.get_index()\n kw = random.choice(list(index.keys()))\n return {\"index_type\": str(type(index)),\n \"indexer_type\": str(type(self.indexer)),\n \"post_mem\": str(index[kw]),\n \"post_type\": str(type(index[kw])),\n \"node_mem\": str(index[kw].start_node),\n \"node_type\": str(type(index[kw].start_node)),\n \"node_value\": str(index[kw].start_node.value),\n \"command_result\": eval(command) if \".\" in command else \"\"}",
"def _validate_input_dict(self, input):\n if isinstance(input, dict):\n required = {\"type\", \"value\"}\n not_found = required - set(input.keys())\n if not_found:\n raise SpecificationError(\n \"Required key(s) not found in input dictionary: {}\".format(\n \", \".join(not_found)\n )\n )\n else:\n raise Exception(\"input element has to be a dictionary\")",
"def _validated_conf_command(self, command):\n\n if (1, command) in self.CONF_ALLOWED_COMMANDS or (0, command) in self.CONF_ALLOWED_COMMANDS:\n self._last_command = command\n self._last_command_mode = 1\n self._last_command_failure = None\n return True\n\n self._last_command_failure = 'Unrecognised Command'",
"def validate_basic_structure(yaml_dict):\n assert len(yaml_dict) == 1\n assert \"meta\" in yaml_dict[0]\n assert \"class\" in yaml_dict[0]\n assert \"properties\" in yaml_dict[0][\"meta\"]\n assert \"attributes\" in yaml_dict[0][\"meta\"]\n assert \"commands\" in yaml_dict[0][\"meta\"]",
"def test_missing_mandatory(self):\n try:\n CollectorUpdate()\n self.assertFalse(\"RuntimeError expected\")\n except RuntimeError as exception:\n assert_that(str(exception), equal_to(\"Missing keys: 'stage', 'status', 'timestamp'\"))",
"def test_process_args_should_reject_missing_units(self, arg_dict):\n with pytest.raises(KeyError):\n change_resolution.process_args(arg_dict)",
"def _msg_is_command(self, msg):\n return isinstance(msg, dict)",
"def __assert_option(self, key):\n\n if not self.has_option(key):\n raise KeyError(\"No such option.\")",
"def _validate_command(self):\n if not isinstance(self.command, list):\n raise securesystemslib.exceptions.FormatError(\n \"Invalid Link: field `command` must be of type list, got: {}\"\n .format(type(self.command)))",
"def test_if_it_accepts_dictionary(self):\n with self.assertRaises(TypeError):\n prime_numbers({})",
"def test_raise_exception(self):\n with self.assertRaises(Exception):\n SshpassBaseCommandBuilder(COMMAND).to_build()",
"def _verify_arguments(self):\n # if self.options.action == \"create\":\n # if self.options.encrypt_payload and not self.options.payload_secret:\n # self.parser.error('A secret must be supplied with --payload-secret option when the --encrypt-payload option is in use.')\n pass",
"def _validate_environment(self):\n if not isinstance(self.environment, dict):\n raise securesystemslib.exceptions.FormatError(\n \"Invalid Link: field `environment` must be of type dict, got: {}\"\n .format(type(self.environment)))",
"def check_dictionary(self, dico):\n if dico is not None:\n self.log.info('Check the dictionary')\n test, aff = validate(dico, proto_domain, test_comp = False)\n if test:\n self.log.info(aff)\n else:\n self.log.error(aff)\n sys.exit()",
"def test_check_secrets(self):\n secrets.check_secrets([], argparse.Namespace())",
"def is_valid_command(command):\n # TODO(etscrivner): Eventually we'd like to construct this dynamically from\n # a list of all available commands\n valid_commands = [\n 'add', 'append', 'decr', 'delete', 'flush_all', 'get', 'gets', 'incr',\n 'prepend', 'quit', 'replace', 'set', 'stats', 'verbosity', 'version',\n ]\n\n if not command:\n return False\n\n parts = command.split('\\r\\n')\n command_parts = parts[0].split(' ')\n\n command = command_parts[0]\n return command.strip().lower() in valid_commands",
"def test_sendCommandValidateTagsWithSpaces(self):\n sendTags = {\"aaa bbb\": \"ccc\"}\n error = self.assertRaises(\n ValueError,\n self.p.sendCommand,\n \"CMD\",\n (\"param1\", \"param2\"),\n \"irc.example.com\",\n sendTags,\n )\n self.assertEqual(error.args[0], \"Tag contains invalid characters.\")",
"def test_dumpling_with_missing_chef(self, packet_dumpling_dict):\n del packet_dumpling_dict['metadata']['chef']\n\n with pytest.raises(InvalidDumpling):\n validate_dumpling(json.dumps(packet_dumpling_dict))",
"def _secret_not_in_order():\n pecan.abort(400, u._(\"Secret metadata expected but not received.\"))",
"def _dict_validity_check(d, valid_d):\n\n if not Settings._is_in_dict(d, valid_d):\n raise InvalidSettingError()",
"def _cli_validate(self, settings, remaining_argv):\n return None",
"def check_commands(self):\n pass"
]
| [
"0.6877361",
"0.6581146",
"0.6167865",
"0.6011121",
"0.5824068",
"0.5806398",
"0.5696506",
"0.5690803",
"0.5660363",
"0.56550825",
"0.56041145",
"0.5590976",
"0.5538971",
"0.55288416",
"0.5444495",
"0.5408569",
"0.5354845",
"0.5340966",
"0.53407615",
"0.53329164",
"0.531246",
"0.5300639",
"0.5298981",
"0.5254272",
"0.52427894",
"0.5240568",
"0.5236609",
"0.5216604",
"0.5216511",
"0.5198831"
]
| 0.6656696 | 1 |
Returns whether the given string matches any of the commands' names regex patterns. | def match(self, string):
matched = False
cmd = None
if string in self.commands.keys():
matched = True
cmd = string
else:
for command in self.commands.keys():
if "regex" in self.commands[command].keys() \
and re.match(self.commands[command]["regex"], string):
matched = True
cmd = command
break
if cmd and len(cmd) > 0:
self._last_matched_command = cmd
else:
self._last_matched_command = None
return matched | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_name_matching(string, matches: bool):\n assert (re.fullmatch(pattern, string) is not None) == matches",
"def _is_regex_match(s, pat):\n\n pat = pat.rstrip()\n m = re.search(Settings._REPAT, pat)\n if m:\n flags_combined = 0\n if m.group('flag'):\n char_to_flag = {\n 'A':re.A, 'I':re.I, 'L':re.L, 'M':re.M, 'S':re.S, 'X':re.X}\n for flag in list(m.group('flag')):\n flags_combined |= char_to_flag[flag]\n return bool(re.search(m.group('pat'), s, flags_combined))\n raise InvalidRegexError(pat)",
"def check_for_strings(text, strings):\n for string in strings:\n if text.find(string) >= 0:\n return True\n return False",
"def match(self, flags):\n\t\tflags = flags.split(',')\n\t\tif len(self._flags) != len(flags):\n\t\t\treturn False\n\t\tfor i, f in enumerate(flags):\n\t\t\tif f == '*':\n\t\t\t\tcontinue\n\t\t\tfor f in r.split('|'):\n\t\t\t\tif not f.isalnum():\n\t\t\t\t\traise ValueError(f'invalid flag \"{f}\" : flags must be alphanumeric')\n\t\t\t\tif f == self._flags[i]:\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\treturn False\n\t\treturn True",
"def _regex_comp(self, name, flist):\n if name in flist:\n return True\n for item in flist:\n p = re.compile(item)\n match = p.match(name)\n if (match is not None):\n return True\n return False",
"def matches(self, pid):\n if self._command_wildcards or self._command_regexs:\n # Matchers requiring comm file\n path = P.join(PROC_DIR, str(pid), 'comm')\n try:\n with open(path) as f:\n comm = f.read().rstrip()\n for pattern in self._command_wildcards:\n if fnmatch(comm, pattern):\n return True\n\n for re_obj in self._command_regexs:\n if re_obj.match(comm):\n return True\n except FileNotFoundError:\n # process may have exited before file could be read\n return False\n\n return False",
"def has_pattern(self, name):\n return name in self.__patterns",
"def command_match(text, command):\n text = text.split()\n command = command.split()\n if len(text) != len(command):\n return False\n for i, val in enumerate(text):\n if val != command[i][:len(val)]:\n return False\n return True",
"def isValid(text):\n return bool(re.search(r'\\b((kill|stop) the (alarm|clock|music))\\b', text, re.IGNORECASE))",
"def matches(self, text):\n return text == self.command",
"def stringcheck(self, rule, string):\n if not \"*\" in rule:\n return rule in string\n elif rule[0] == \"*\":\n return string.endswith(rule[1:])\n elif rule[-1] == \"*\":\n return string.startswith(rule[:-1])\n else:\n start, end = rule.split(\"*\")\n return string.startswith(start) and string.endswith(end)",
"def matchPatterns(path, patterns):\n name = os.path.basename(path)\n for p in patterns:\n if fnmatch.fnmatch(name, p):\n return True\n return False",
"def isValid(text):\n return bool(re.search(r\"\\b((close|activate)\\ (check|tunnel|ubuntu|fedora|windows))\\b\", text, re.IGNORECASE))",
"def is_matching(patterns, blob):\n for pattern in patterns:\n if re.match(fnmatch.translate(pattern), blob.path):\n return True\n return False",
"def _memorized_fnmatch(name: str, pattern: str) -> bool:\n return bool(_compile_fnmatch(pattern).match(name))",
"def __reWildcard(self, regexp, string):\n regexp = re.sub(\"\\*+\", \"*\", regexp)\n match = True\n if regexp.count(\"*\") == 0:\n if regexp == string:\n return True\n else:\n return False\n blocks = regexp.split(\"*\")\n start = \"\"\n end = \"\"\n if not regexp.startswith(\"*\"):\n start = blocks[0]\n if not regexp.endswith(\"*\"):\n end = blocks[-1]\n if start != \"\":\n if string.startswith(start):\n blocks = blocks[1:]\n else:\n return False\n if end != \"\":\n if string.endswith(end):\n blocks = blocks[:-1]\n else:\n return False\n blocks = [block for block in blocks if block != \"\"]\n if blocks == []:\n return match\n for block in blocks:\n i = string.find(block)\n if i == -1:\n return False\n string = string[i + len(block):]\n return match",
"def match_patterns(pathname, patterns):\n for pattern in patterns:\n if fnmatch(pathname, pattern):\n return True\n return False",
"def is_ignored(string: str) -> bool:\n return any([fnmatch.fnmatch(string, pattern) for pattern in IGNORE_LIST])",
"def regex_match(text, pattern):\n try:\n pattern = re.compile(\n pattern,\n flags=re.IGNORECASE + re.UNICODE + re.MULTILINE,\n )\n except BaseException:\n return False\n return pattern.search(text) is not None",
"def found(self, command, regex):\n result = self.sys(command)\n for line in result:\n found = re.search(regex,line)\n if found:\n return True\n return False",
"def found(self, command, regex):\n result = self.sys(command)\n for line in result:\n found = re.search(regex,line)\n if found:\n return True\n return False",
"def match(pattern, target):\n pattern = ''.join('.*' if c == '*' else re.escape(c) for c in pattern)\n return bool(re.match('^' + pattern + '$', target))",
"def check_word(word):\n\n return bool(re.match(r'^[a-z]+$', word))",
"def _check_logic_syntax(string):\n return logExp.matches(string)",
"def __contains__(self, label: str) -> bool:\n return label in self.fuzzy_patterns or label in self.regex_patterns",
"def match(self, string):\n ary = string.split(' ', len(self.matchers))\n if all(m(a) for m, a in zip(self.matchers, ary)):\n return ary",
"def recipe_name(self, recipe_name: str) -> bool:\n if not self._recipe_patterns:\n return True\n\n if self._compiled_recipe_pattern is None:\n self._compiled_recipe_pattern = re.compile('|'.join(self._recipe_patterns))\n\n return self._compiled_recipe_pattern.match(recipe_name)",
"def funcpattern(funcstr):\n m = match('.*\\(.*\\)',funcstr)\n if m and m.start() == 0 and m.end() == len(funcstr):\n return True\n return False",
"def match(pattern, string):\n if not len(pattern) and not len(string):\n return True\n\n if len(pattern) > 1 and pattern[0] == '*' and len(string) == 0:\n return False\n\n if (len(pattern) > 0 and pattern[0] == '?') or \\\n (len(pattern) != 0 and len(string) != 0 and pattern[0] == string[0]):\n return match(pattern[1:], string[1:])\n\n if len(pattern) != 0 and pattern[0] == '*':\n return match(pattern[1:], string) or match(pattern, string[1:])\n\n return False",
"def contains(text: str, pattern: str) -> bool:\n assert isinstance(text, str), 'text is not a string: {}'.format(text)\n assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)\n # COMPLEXITY: O(n) b/c we are using find_index method which is O(n)\n return find_index(text, pattern) is not None"
]
| [
"0.6867948",
"0.6496885",
"0.6373055",
"0.6343486",
"0.62946117",
"0.627003",
"0.62478054",
"0.6186492",
"0.6147709",
"0.6064243",
"0.6061243",
"0.6040784",
"0.60394794",
"0.6034066",
"0.600563",
"0.5999523",
"0.5939395",
"0.5923962",
"0.59091336",
"0.589685",
"0.589685",
"0.58809954",
"0.587251",
"0.58674395",
"0.5847755",
"0.58454853",
"0.5799971",
"0.5792867",
"0.57912946",
"0.576833"
]
| 0.731758 | 0 |
Return the help string of the given command. | def get_help(self,command):
if "help" in self.commands[command]:
return self.commands[command]["help"]
else:
return "No help defined for this command." | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def help_for_command(command):\n help_text = pydoc.text.document(command)\n # remove backspaces\n return re.subn('.\\\\x08', '', help_text)[0]",
"def help(command=None):\n if command is None: \n # print first line of docstring\n for cmd in commands:\n ds = commands[cmd].__doc__.split('\\n')[0]\n print \"%-16s %s\" % (cmd,ds)\n else:\n print commands[command].__doc__",
"def help(self, cmd=\"\", *, fail=\"\"):\n class_dict = dict(type(self).__dict__)\n # Add this function to class, so that when subclassing,\n # help for help is found\n class_dict.update({\"help\": self.help})\n if cmd.startswith(self.predicate):\n # Strip predicate\n cmd = cmd[len(self.predicate) :]\n # Check that command exists and is not\n # private, protected or special method\n if (not cmd.startswith(\"_\")) and cmd in class_dict.keys():\n item = class_dict[cmd]\n if callable(item):\n if item.__doc__:\n return \"Help on command '{}':\\n. {}\".format(\n cmd, \"\\n. \".join(cleandoc(item.__doc__).split(\"\\n\"))\n )\n return \"No help on command '{}'\".format(cmd)\n # If no cmd given or wrong cmd given, return commands\n commands = []\n for key, value in class_dict.items():\n if not key.startswith(\"_\"):\n if callable(value):\n commands.append(key)\n msg = (\n \"Commands:\\n {}\".format(\", \".join(commands))\n + \"\\n for more help on command, use \"\n + \"{}help command\".format(self.predicate)\n )\n if fail:\n msg = fail + \"\\n\" + msg\n return msg",
"def get_command_help(self, module_name, command_name):\r\n command = self.env.get_command(module_name, command_name)\r\n\r\n default_format = 'raw'\r\n if sys.stdout.isatty():\r\n default_format = 'table'\r\n\r\n arg_doc = command.__doc__\r\n\r\n if 'confirm' in command.options:\r\n arg_doc += \"\"\"\r\nPrompt Options:\r\n -y, --really Confirm all prompt actions\r\n\"\"\"\r\n\r\n if '[options]' in arg_doc:\r\n arg_doc += \"\"\"\r\nStandard Options:\r\n --format=ARG Output format. [Options: table, raw] [Default: %s]\r\n -C FILE --config=FILE Config file location. [Default: ~/.softlayer]\r\n --debug=LEVEL Specifies the debug noise level\r\n 1=warn, 2=info, 3=debug\r\n --timings Time each API call and display after results\r\n --proxy=PROTO:PROXY_URL HTTP[s] proxy to be use to make API calls\r\n -h --help Show this screen\r\n\"\"\" % default_format\r\n return arg_doc.strip()",
"def command_help(self, command):\n self.commands[command].command_help()",
"def rpc_help(self, cmd: str = None) -> str:\n if cmd:\n return self._call_command([\"help\", cmd])\n return self._call_command([\"help\"])",
"def do_command_help(self, command):\n summary = self.base.commands[command].get_summary()\n usage = self.get_command_usage(command)\n description = self.base.commands[command].get_description()\n sys.stdout.write('%s\\n%s' % (summary, usage))\n if description != None:\n sys.stdout.write('Arguments Description:\\n%s\\n' %\n (description, ))",
"def get_usage_command(self):\n return textwrap.fill(self.expand_prog_name(\"Type '%prog help' for usage information.\"), 78)",
"def get_help(self):\n helpstr = \"\"\n helpstr += self.get_usage()\n helpstr += \"\\n\"\n helpstr += textwrap.fill(self.expand_prog_name(\"Type '%prog help <subcommand>' for help on a specific subcommand.\"), 78)\n helpstr += \"\\n\"\n helpstr += textwrap.fill(self.expand_prog_name(\"Type '%prog --version' to see the program version.\"), 78)\n helpstr += \"\\n\"\n helpstr += textwrap.fill(self.expand_prog_name(\"Type '%prog --verbose-load' to see the packages and plug-ins detected, and if plug-ins are successfully loaded.\"), 78)\n helpstr += \"\\n\\n\"\n\n helpstr += textwrap.fill(\"Subcommands consist of built-in subcommands and subcommands provided by installed plug-ins.\", 78)\n helpstr += \"\\n\\n\"\n\n helpstr += \"Available subcommands:\\n\"\n helpstr += self.sbtools.get_subcommands()\n\n return helpstr",
"def get_usage_command(self):\n return textwrap.fill(self.sbtools.parser.expand_prog_name(\"Type '%prog help %s' for usage.\") % (self.tool.get_command()), 78)",
"def get_command_help_message(\n project_dictionary: Dictionaries, command_name: str\n ) -> str:\n command_template = CommandSendCommand.get_command_template(\n project_dictionary, command_name\n )\n return misc_utils.get_cmd_template_string(command_template)",
"def help_command(bot: Phial) -> str:\n help_text = cast(str, bot.config.get(\"baseHelpText\", \"\"))\n if help_text:\n help_text += \"\\n\"\n for command in bot.commands:\n if command.hide_from_help_command:\n continue\n command_doc = command.help_text\n if not command_doc:\n # If no help text default to blank string\n command_doc = \"\"\n command_help_text = parse_help_text(command_doc)\n help_text += \"*{0}* - {1}\\n\".format(command.pattern_string, command_help_text)\n return help_text",
"def help(self):\n help = ''\n cmds = [(x, y) for x, y in Commands.__dict__.iteritems()]\n cmds.sort(key=lambda x: x[0])\n for name, member in cmds:\n if name.startswith('cmd_') and callable(member):\n help += ' %s\\n' % ' '.join([name[4:]] +\n ['<%s>' % x for x in\n inspect.getargspec(member).args[1:]])\n if member.__doc__:\n help += ' %s\\n' % member.__doc__.splitlines()[0]\n return 'Available commands:\\n%s' % help",
"def help(cmd, cmdArgs):\n global commandDict\n retInfo = []\n if len(cmdArgs) > 0:\n #return help on a single function\n if cmdArgs[0] in commandDict.keys():\n return commandDict[cmdArgs[0]].__doc__\n\n #else, return general info\n retInfo = ['pypeople: A command line tool for vCard management',\n 'Version:' + __version__,\n 'Available Commands:']\n #fill in more info here\n for cmdName in commandDict.keys():\n cmdFunc = commandDict[cmdName]\n cmdDoc = str(cmdName) + ': ' + str(cmdFunc.__doc__) if cmdFunc.__doc__ is not None else 'Undocumented Function'\n retInfo.append('\\t' + cmdDoc)\n\n return '\\n'.join(retInfo)",
"def command_help(self):\n print(\"Command \", self)\n print(\"\\t\\thelp (Get help for command)\")\n\n params = self.params.copy()\n del params[\"help\"]\n\n if len(params) == 0:\n print(\"This command has no parameters\")\n return\n\n print(\"Parameters:\")\n for info in params.values():\n print(\" %s\" % info.get_basic_info())\n description = info.get_desc()\n if description != \"\":\n print(textwrap.fill(description,\n initial_indent=\" \",\n subsequent_indent=\" \",\n width=70))",
"def do_print_help(parser):\n string_io = StringIO()\n parser.print_help(file=string_io)\n return string_io.getvalue()",
"def make_help_cmd(cmd, docstring):\n def help_cmd(message=docstring, cmd=cmd):\n print('=' * 15)\n print('\\nHelp for command %s:\\n' % (cmd,))\n print(message.strip())\n print('')\n print('=' * 15)\n print('')\n\n return help_cmd",
"async def _help(ctx, *, command_name: str=None):\n if command_name:\n command = bot.get_command(command_name)\n if not command:\n return await ctx.send(\"No such command!\")\n return await ctx.send(f\"```\\n{ctx.prefix}{command.name} {command.signature}\\n\\n{command.help or 'Missing description'}```\")\n description = []\n for name, cog in bot.cogs.items():\n entries = [\" - \".join([cmd.name, cmd.short_doc or \"Missing description\"]) for cmd in cog.get_commands() if await _can_run(cmd, ctx) and not cmd.hidden]\n if entries:\n description.append(f\"**{name}**:\")\n description.append(\"• \" + \"\\n• \".join(entries))\n await ctx.send(embed=discord.Embed(description=\"\\n\".join(description), color=ctx.me.color))",
"async def help(ctx, command:str=None):\n if command == None:\n embed = assemble_embed(\n title=\"Looking for help?\",\n desc=(\"Hey there, I'm a resident bot of Scioly.org!\\n\\n\" +\n \"On Discord, you can send me commands using `!` before the command name, and I will process it to help you! \" +\n \"For example, `!states`, `!events`, and `!fish` are all valid commands that can be used!\\n\\n\" +\n \"If you want to see some commands that you can use on me, just type `!list`! \" +\n \"If you need more help, please feel free to reach out to a staff member!\")\n )\n return await ctx.send(embed=embed)\n hlp = await get_help(ctx, command)\n await ctx.send(embed=hlp)",
"def help_help(self):\n print(\"List commands or print details about a command\")",
"async def help(self, ctx, *, command_name: str=None):\n bot_prefix = '@Randy '\n # Shortcut to command search\n if command_name is not None:\n return await ctx.invoke(self.cmd('help command'), cmd_name=command_name)\n\n em = discord.Embed(title='Help',\n description='**Permissions:** The permissions required to function :-\\n'\n '`Send Messages`, `Manage Messages`, `Embed Links`\\n'\n '--\\nTo get help or more information on a specific command, use:\\n'\n '`{bot_prefix}help <command name>`\\n'\n '--\\nRead my messy code [here](http://github.com/xKynn/RandomRumble)'\n '--\\nIf you like my work and would like to help me, '\n 'Ko-Fi/Paypal: [Link](https://ko-fi.com/D1D6EXXV)\\n',\n color=self.color)\n\n em.set_footer(text=\"Contact me at Demo#7645\")\n\n # This can't go in the init because help isn't loaded last & thus misses some commands\n em.add_field(name=\"Commands\", value=' • '+'\\n • '.join(f\"***{c.name}*** - {c.short_doc}\" for c in self.bot.commands if\n c.name not in ['pob', 'link', 'convert']))\n try:\n await ctx.send(embed=em)\n except:\n await ctx.send(\"`Embed Links` permission is required to see the help!\")",
"def help_command(server, output, conf):\n server.tell(output.name, 'Available commands:')\n for key in COMMANDS.keys():\n cmd_func = COMMANDS[key]\n if cmd_func.__doc__:\n server.tell(output.name, '%s: %s' % (key[1:], cmd_func.__doc__))\n else:\n server.tell(output.name, key[1:])\n return",
"async def help_command(self, ctx, *, cmd_name: str=None):\n bot_prefix = '@Randy '\n # Get command object\n cmd_obj = self.cmd(cmd_name)\n\n # Handle no command found\n if cmd_obj is None:\n return await ctx.error(f'Command {cmd_name} not found')\n em = discord.Embed(title=cmd_obj.name, description=cmd_obj.help, color=self.color)\n\n # Input aliases and parameters to embed\n if cmd_obj.aliases:\n em.add_field(name='Aliases', value='\\n'.join([f'\\u2022 {x}' for x in cmd_obj.aliases]))\n if cmd_obj.clean_params:\n em.add_field(name='Parameters', value='\\n'.join([f'\\u2022 {x}' for x in cmd_obj.clean_params]))\n\n # Handle group commands\n if isinstance(cmd_obj, commands.core.Group):\n em.add_field(name='Group commands',\n value='\\n'.join([f'\\u2022 {x}' for x in cmd_obj.commands]),\n inline=False)\n\n # Add usage last\n em.add_field(name='Usage',\n value=f'```{bot_prefix}\\u200b{cmd_name} '\n f'{\" \".join([f\"<{x}>\" for x in cmd_obj.clean_params])}```',\n inline=False)\n\n await ctx.send(embed=em)",
"async def send_command_help(self, command):\n ctx = self.context\n embed = discord.Embed(title=command.name.upper(), description=command.description, color=discord.Color.green())\n alias = command.aliases\n if alias:\n embed.add_field(name=\"Aliases\", value=\", \".join(alias), inline=False)\n if command.usage != None:\n embed.add_field(name=\"How to use:\", value=f'`{command.usage}`')\n\n await ctx.reply(embed=embed)",
"def help(self):\n self.logger.debug(\"module.Module.help()\")\n return os.linesep.join([\"{}:\".format(self.name),\n self.helptext])",
"def try_command(commandName, commandOptsList):\n retString = help.__doc__ # default to help doc\n if commandName in commandDict.keys():\n retString = commandDict[commandName](commandName, commandOptsList)\n return retString",
"def HelpForCmd(self, name):\n canonical_name = self._cmd_alias_list.get(name)\n if not canonical_name:\n raise CmdNotFoundError('Command not found: \"%s\"' % name)\n cmd = self._cmd_list[canonical_name]\n if cmd.__doc__.strip():\n flags_help = ''\n cmd_flags = self._flag_values_by_cmd[canonical_name]\n if cmd_flags.RegisteredFlags():\n prefix = ' '\n flags_help += '%s\\nFlags for %s:\\n' % (prefix, name)\n flags_help += cmd_flags.GetHelp(prefix + ' ')\n flags_help = _DeleteSpecialFlagHelp(flags_help)\n flags_help += '\\n\\n'\n return cmd.__doc__ + flags_help\n else:\n raise AssertionError('No class docstring found for command %s' % name)",
"def command_help(self, *args, **kwargs):\n print(\"Commands available:\\n\")\n for name in dir(self):\n if not name.startswith(\"command_\"):\n continue\n name_clean = name[len(\"command_\"):]\n print(\"%s:\\n - %s\\n\" % (name_clean, getattr(self, name).__doc__.strip()))",
"def _help():\n text = \"\"\"\n```\nUsage: @bot [command] (message)\n\n*Commands*:\n \n add Adds specified users to notify list | To add yourself use key 'myself'\n list Lists users on notify list\n remove Removes specified users from notify list | To remove yourself use key 'myself'\n help This help\n\n > Obs: All commands are optional\n\n*Examples*:\n\n @bot add myself @Fulano <= Will add yourself and @Fulano in list.\n @bot this a messge test <= Send 'this a messge test' to all list.\n @bot remove myself @Fulano <= Remove yourself and @fulano from list.\n\n```\n\"\"\"\n return text",
"def _help(self):\n self.onecmd('help')"
]
| [
"0.8413381",
"0.78988314",
"0.7859746",
"0.7836614",
"0.77694803",
"0.7740121",
"0.76692426",
"0.7644076",
"0.76437217",
"0.7596462",
"0.7582023",
"0.7548597",
"0.75207776",
"0.75028855",
"0.74989355",
"0.74042505",
"0.7392222",
"0.7373897",
"0.7336396",
"0.7305406",
"0.7274974",
"0.72239286",
"0.7222709",
"0.72144216",
"0.7203675",
"0.71967584",
"0.7195591",
"0.7184186",
"0.71588176",
"0.7155417"
]
| 0.8688453 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.