body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
@property
def is_global(self) -> bool:
'\n Indicates whether this metric should be reported globally or per-task.\n '
return False | -5,529,555,185,957,866,000 | Indicates whether this metric should be reported globally or per-task. | parlai/core/metrics.py | is_global | Totoola-Kehinde/ParlAI | python | @property
def is_global(self) -> bool:
'\n \n '
return False |
@property
def macro_average(self) -> bool:
'\n Indicates whether this metric should be macro-averaged when globally reported.\n '
return False | 7,077,317,411,745,468,000 | Indicates whether this metric should be macro-averaged when globally reported. | parlai/core/metrics.py | macro_average | Totoola-Kehinde/ParlAI | python | @property
def macro_average(self) -> bool:
'\n \n '
return False |
@abstractmethod
def value(self) -> float:
'\n Return the value of the metric as a float.\n '
pass | 2,648,553,321,153,724,400 | Return the value of the metric as a float. | parlai/core/metrics.py | value | Totoola-Kehinde/ParlAI | python | @abstractmethod
def value(self) -> float:
'\n \n '
pass |
def __sub__(self, other: Any) -> float:
'\n Used heavily for assertAlmostEqual.\n '
if (not isinstance(other, float)):
raise TypeError('Metrics.__sub__ is intentionally limited to floats.')
return (self.value() - other) | 3,335,831,958,817,604,600 | Used heavily for assertAlmostEqual. | parlai/core/metrics.py | __sub__ | Totoola-Kehinde/ParlAI | python | def __sub__(self, other: Any) -> float:
'\n \n '
if (not isinstance(other, float)):
raise TypeError('Metrics.__sub__ is intentionally limited to floats.')
return (self.value() - other) |
def __rsub__(self, other: Any) -> float:
'\n Used heavily for assertAlmostEqual.\n\n NOTE: This is not necessary in python 3.7+.\n '
if (not isinstance(other, float)):
raise TypeError('Metrics.__rsub__ is intentionally limited to floats.')
return (other - self.value()) | -2,963,852,906,706,468,000 | Used heavily for assertAlmostEqual.
NOTE: This is not necessary in python 3.7+. | parlai/core/metrics.py | __rsub__ | Totoola-Kehinde/ParlAI | python | def __rsub__(self, other: Any) -> float:
'\n Used heavily for assertAlmostEqual.\n\n NOTE: This is not necessary in python 3.7+.\n '
if (not isinstance(other, float)):
raise TypeError('Metrics.__rsub__ is intentionally limited to floats.')
return (other - self.value()) |
@classmethod
def many(cls, *objs: List[TVector]) -> List['Metric']:
'\n Construct many of a Metric from the base parts.\n\n Useful if you separately compute numerators and denomenators, etc.\n '
lengths = [len(o) for o in objs]
if (len(set(lengths)) != 1):
raise IndexError(f'Uneven {cls.__name__} constructions: {lengths}')
return [cls(*items) for items in zip(*objs)] | -5,461,492,413,293,750,000 | Construct many of a Metric from the base parts.
Useful if you separately compute numerators and denomenators, etc. | parlai/core/metrics.py | many | Totoola-Kehinde/ParlAI | python | @classmethod
def many(cls, *objs: List[TVector]) -> List['Metric']:
'\n Construct many of a Metric from the base parts.\n\n Useful if you separately compute numerators and denomenators, etc.\n '
lengths = [len(o) for o in objs]
if (len(set(lengths)) != 1):
raise IndexError(f'Uneven {cls.__name__} constructions: {lengths}')
return [cls(*items) for items in zip(*objs)] |
@property
def macro_average(self) -> bool:
'\n Indicates whether this metric should be macro-averaged when globally reported.\n '
return True | -2,759,622,566,340,719,000 | Indicates whether this metric should be macro-averaged when globally reported. | parlai/core/metrics.py | macro_average | Totoola-Kehinde/ParlAI | python | @property
def macro_average(self) -> bool:
'\n \n '
return True |
@staticmethod
def _prec_recall_f1_score(pred_items, gold_items):
'\n Compute precision, recall and f1 given a set of gold and prediction items.\n\n :param pred_items: iterable of predicted values\n :param gold_items: iterable of gold values\n\n :return: tuple (p, r, f1) for precision, recall, f1\n '
common = (Counter(gold_items) & Counter(pred_items))
num_same = sum(common.values())
if (num_same == 0):
return (0, 0, 0)
precision = ((1.0 * num_same) / len(pred_items))
recall = ((1.0 * num_same) / len(gold_items))
f1 = (((2 * precision) * recall) / (precision + recall))
return (precision, recall, f1) | -1,778,421,382,323,034,600 | Compute precision, recall and f1 given a set of gold and prediction items.
:param pred_items: iterable of predicted values
:param gold_items: iterable of gold values
:return: tuple (p, r, f1) for precision, recall, f1 | parlai/core/metrics.py | _prec_recall_f1_score | Totoola-Kehinde/ParlAI | python | @staticmethod
def _prec_recall_f1_score(pred_items, gold_items):
'\n Compute precision, recall and f1 given a set of gold and prediction items.\n\n :param pred_items: iterable of predicted values\n :param gold_items: iterable of gold values\n\n :return: tuple (p, r, f1) for precision, recall, f1\n '
common = (Counter(gold_items) & Counter(pred_items))
num_same = sum(common.values())
if (num_same == 0):
return (0, 0, 0)
precision = ((1.0 * num_same) / len(pred_items))
recall = ((1.0 * num_same) / len(gold_items))
f1 = (((2 * precision) * recall) / (precision + recall))
return (precision, recall, f1) |
@staticmethod
def compute(guess: str, answers: List[str], k: int=4) -> Optional['BleuMetric']:
'\n Compute approximate BLEU score between guess and a set of answers.\n '
if (nltkbleu is None):
return None
weights = [(1 / k) for _ in range(k)]
score = nltkbleu.sentence_bleu([normalize_answer(a).split(' ') for a in answers], normalize_answer(guess).split(' '), smoothing_function=nltkbleu.SmoothingFunction(epsilon=1e-12).method1, weights=weights)
return BleuMetric(score) | -7,477,616,927,497,190,000 | Compute approximate BLEU score between guess and a set of answers. | parlai/core/metrics.py | compute | Totoola-Kehinde/ParlAI | python | @staticmethod
def compute(guess: str, answers: List[str], k: int=4) -> Optional['BleuMetric']:
'\n \n '
if (nltkbleu is None):
return None
weights = [(1 / k) for _ in range(k)]
score = nltkbleu.sentence_bleu([normalize_answer(a).split(' ') for a in answers], normalize_answer(guess).split(' '), smoothing_function=nltkbleu.SmoothingFunction(epsilon=1e-12).method1, weights=weights)
return BleuMetric(score) |
@staticmethod
def compute_many(guess: torch.Tensor, answers: torch.Tensor, pad_idx, end_idx, unk_idx):
'\n Return BLEU-1..4 using fairseq and tokens.\n '
if (fairseqbleu is None):
return None
scorer = fairseqbleu.Scorer(pad_idx, end_idx, unk_idx)
answers = answers.cpu().int()
guess = guess.cpu().int()
scorer.add(answers, guess)
return [FairseqBleuMetric((scorer.score(i) / 100.0)) for i in range(1, 5)] | -7,551,354,395,805,417,000 | Return BLEU-1..4 using fairseq and tokens. | parlai/core/metrics.py | compute_many | Totoola-Kehinde/ParlAI | python | @staticmethod
def compute_many(guess: torch.Tensor, answers: torch.Tensor, pad_idx, end_idx, unk_idx):
'\n \n '
if (fairseqbleu is None):
return None
scorer = fairseqbleu.Scorer(pad_idx, end_idx, unk_idx)
answers = answers.cpu().int()
guess = guess.cpu().int()
scorer.add(answers, guess)
return [FairseqBleuMetric((scorer.score(i) / 100.0)) for i in range(1, 5)] |
@staticmethod
def compute_many(guess: str, answers: List[str]) -> Tuple[(Optional['RougeMetric'], Optional['RougeMetric'], Optional['RougeMetric'])]:
'\n Compute ROUGE score between guess and *any* answer.\n\n Done with compute_many due to increased efficiency.\n\n :return: (rouge-1, rouge-2, rouge-L)\n '
global rouge
if (rouge is None):
return (None, None, None)
if (RougeMetric._evaluator is None):
RougeMetric._evaluator = rouge.Rouge(metrics=['rouge-n', 'rouge-l'], max_n=2)
try:
scores = [RougeMetric._evaluator.get_scores(normalize_answer(guess), normalize_answer(a)) for a in answers]
except LookupError:
warn_once('ROUGE requires nltk punkt tokenizer. Please run `python -c "import nltk; nltk.download(\'punkt\')`')
return (None, None, None)
scores_rouge1 = max((score['rouge-1']['r'] for score in scores))
scores_rouge2 = max((score['rouge-2']['r'] for score in scores))
scores_rougeL = max((score['rouge-l']['r'] for score in scores))
return (RougeMetric(scores_rouge1), RougeMetric(scores_rouge2), RougeMetric(scores_rougeL)) | 8,505,124,168,108,570,000 | Compute ROUGE score between guess and *any* answer.
Done with compute_many due to increased efficiency.
:return: (rouge-1, rouge-2, rouge-L) | parlai/core/metrics.py | compute_many | Totoola-Kehinde/ParlAI | python | @staticmethod
def compute_many(guess: str, answers: List[str]) -> Tuple[(Optional['RougeMetric'], Optional['RougeMetric'], Optional['RougeMetric'])]:
'\n Compute ROUGE score between guess and *any* answer.\n\n Done with compute_many due to increased efficiency.\n\n :return: (rouge-1, rouge-2, rouge-L)\n '
global rouge
if (rouge is None):
return (None, None, None)
if (RougeMetric._evaluator is None):
RougeMetric._evaluator = rouge.Rouge(metrics=['rouge-n', 'rouge-l'], max_n=2)
try:
scores = [RougeMetric._evaluator.get_scores(normalize_answer(guess), normalize_answer(a)) for a in answers]
except LookupError:
warn_once('ROUGE requires nltk punkt tokenizer. Please run `python -c "import nltk; nltk.download(\'punkt\')`')
return (None, None, None)
scores_rouge1 = max((score['rouge-1']['r'] for score in scores))
scores_rouge2 = max((score['rouge-2']['r'] for score in scores))
scores_rougeL = max((score['rouge-l']['r'] for score in scores))
return (RougeMetric(scores_rouge1), RougeMetric(scores_rouge2), RougeMetric(scores_rougeL)) |
def add(self, key: str, value: Optional[Metric]) -> None:
'\n Record an accumulation to a metric.\n '
if (self._threadsafe and self._worker):
self._buffer[key] = (self._buffer.get(key) + value)
else:
self._data[key] = (self._data.get(key) + value) | -6,747,948,967,818,414,000 | Record an accumulation to a metric. | parlai/core/metrics.py | add | Totoola-Kehinde/ParlAI | python | def add(self, key: str, value: Optional[Metric]) -> None:
'\n \n '
if (self._threadsafe and self._worker):
self._buffer[key] = (self._buffer.get(key) + value)
else:
self._data[key] = (self._data.get(key) + value) |
def flush(self):
'\n Clear the local buffer and push it on.\n '
if (self._threadsafe and self._buffer):
self._queue.put(self._buffer)
self._buffer.clear() | 5,141,311,961,219,108,000 | Clear the local buffer and push it on. | parlai/core/metrics.py | flush | Totoola-Kehinde/ParlAI | python | def flush(self):
'\n \n '
if (self._threadsafe and self._buffer):
self._queue.put(self._buffer)
self._buffer.clear() |
def report(self):
'\n Report the metrics over all data seen so far.\n '
self.sync()
return {k: v for (k, v) in self._data.items()} | 5,447,521,010,190,575,000 | Report the metrics over all data seen so far. | parlai/core/metrics.py | report | Totoola-Kehinde/ParlAI | python | def report(self):
'\n \n '
self.sync()
return {k: v for (k, v) in self._data.items()} |
def sync(self):
'\n Process all items on the queue to ensure it is up to date.\n '
if self._worker:
self.flush()
elif (self._threadsafe and (not self._worker)):
for buffer_ in self._drain_queue():
for (key, value) in buffer_.items():
self._data[key] = (self._data.get(key) + value) | 6,433,977,595,441,718,000 | Process all items on the queue to ensure it is up to date. | parlai/core/metrics.py | sync | Totoola-Kehinde/ParlAI | python | def sync(self):
'\n \n '
if self._worker:
self.flush()
elif (self._threadsafe and (not self._worker)):
for buffer_ in self._drain_queue():
for (key, value) in buffer_.items():
self._data[key] = (self._data.get(key) + value) |
def _drain_queue(self):
'\n Drain the queue, yielding all items in it.\n '
while (not self._queue.empty()):
try:
(yield self._queue.get())
except queue.Empty:
break | -5,236,310,757,927,010,000 | Drain the queue, yielding all items in it. | parlai/core/metrics.py | _drain_queue | Totoola-Kehinde/ParlAI | python | def _drain_queue(self):
'\n \n '
while (not self._queue.empty()):
try:
(yield self._queue.get())
except queue.Empty:
break |
def clear(self):
'\n Clear all the metrics.\n '
if self._worker:
self._buffer.clear()
elif (self._threadsafe and (not self._worker)):
for _ in self._drain_queue():
pass
if self._data:
self._data.clear() | -1,541,648,514,536,968,000 | Clear all the metrics. | parlai/core/metrics.py | clear | Totoola-Kehinde/ParlAI | python | def clear(self):
'\n \n '
if self._worker:
self._buffer.clear()
elif (self._threadsafe and (not self._worker)):
for _ in self._drain_queue():
pass
if self._data:
self._data.clear() |
@staticmethod
def _infer_metrics(cli_arg: str) -> Set[str]:
'\n Parse the CLI metric into a list of metrics we wish to compute.\n '
col: Set[str] = set()
names = cli_arg.split(',')
for n in names:
if (n == 'default'):
col |= DEFAULT_METRICS
elif (n == 'rouge'):
col |= ROUGE_METRICS
elif (n == 'bleu'):
col |= BLEU_METRICS
elif (n == 'all'):
col |= ALL_METRICS
else:
col.add(n)
return col | -7,333,133,530,925,577,000 | Parse the CLI metric into a list of metrics we wish to compute. | parlai/core/metrics.py | _infer_metrics | Totoola-Kehinde/ParlAI | python | @staticmethod
def _infer_metrics(cli_arg: str) -> Set[str]:
'\n \n '
col: Set[str] = set()
names = cli_arg.split(',')
for n in names:
if (n == 'default'):
col |= DEFAULT_METRICS
elif (n == 'rouge'):
col |= ROUGE_METRICS
elif (n == 'bleu'):
col |= BLEU_METRICS
elif (n == 'all'):
col |= ALL_METRICS
else:
col.add(n)
return col |
def evaluate_response(self, observation: Message, labels: List[str]) -> None:
'\n Compute all required text-based metrics based on an observation and labels.\n '
prediction = observation.get('text', None)
self.add('exs', SumMetric(1))
if (prediction is not None):
self.add('accuracy', ExactMatchMetric.compute(prediction, labels))
self.add('f1', F1Metric.compute(prediction, labels))
for k in range(1, 5):
if (f'bleu-{k}' in self._metrics_list):
self.add(f'bleu-{k}', BleuMetric.compute(prediction, labels, k))
if (self._metrics_list & ROUGE_METRICS):
(r1, r2, rL) = RougeMetric.compute_many(prediction, labels)
if ('rouge-1' in self._metrics_list):
self.add('rouge_1', r1)
if ('rouge-2' in self._metrics_list):
self.add('rouge_2', r2)
if ('rouge-L' in self._metrics_list):
self.add('rouge_L', rL)
self._update_ranking_metrics(observation, labels)
if ('metrics' in observation):
for (uk, v) in observation['metrics'].items():
if (uk in ALL_METRICS):
uk = f'USER_{uk}'
assert isinstance(uk, str), type(k)
if (not isinstance(v, Metric)):
warn_once(f'Metric {uk} is assumed to be averaged per example.')
v = AverageMetric(v)
assert isinstance(v, Metric)
self.add(uk, v)
self.flush() | -6,620,368,236,673,050,000 | Compute all required text-based metrics based on an observation and labels. | parlai/core/metrics.py | evaluate_response | Totoola-Kehinde/ParlAI | python | def evaluate_response(self, observation: Message, labels: List[str]) -> None:
'\n \n '
prediction = observation.get('text', None)
self.add('exs', SumMetric(1))
if (prediction is not None):
self.add('accuracy', ExactMatchMetric.compute(prediction, labels))
self.add('f1', F1Metric.compute(prediction, labels))
for k in range(1, 5):
if (f'bleu-{k}' in self._metrics_list):
self.add(f'bleu-{k}', BleuMetric.compute(prediction, labels, k))
if (self._metrics_list & ROUGE_METRICS):
(r1, r2, rL) = RougeMetric.compute_many(prediction, labels)
if ('rouge-1' in self._metrics_list):
self.add('rouge_1', r1)
if ('rouge-2' in self._metrics_list):
self.add('rouge_2', r2)
if ('rouge-L' in self._metrics_list):
self.add('rouge_L', rL)
self._update_ranking_metrics(observation, labels)
if ('metrics' in observation):
for (uk, v) in observation['metrics'].items():
if (uk in ALL_METRICS):
uk = f'USER_{uk}'
assert isinstance(uk, str), type(k)
if (not isinstance(v, Metric)):
warn_once(f'Metric {uk} is assumed to be averaged per example.')
v = AverageMetric(v)
assert isinstance(v, Metric)
self.add(uk, v)
self.flush() |
def deserialize(self, f, *, with_time=True):
'Deserialize from addrv1 format (pre-BIP155)'
if with_time:
self.time = struct.unpack('<I', f.read(4))[0]
self.nServices = struct.unpack('<Q', f.read(8))[0]
f.read(12)
self.net = self.NET_IPV4
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack('>H', f.read(2))[0] | -4,551,729,411,307,620,000 | Deserialize from addrv1 format (pre-BIP155) | test/functional/test_framework/messages.py | deserialize | UFO-ETL/ufo | python | def deserialize(self, f, *, with_time=True):
if with_time:
self.time = struct.unpack('<I', f.read(4))[0]
self.nServices = struct.unpack('<Q', f.read(8))[0]
f.read(12)
self.net = self.NET_IPV4
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack('>H', f.read(2))[0] |
def serialize(self, *, with_time=True):
'Serialize in addrv1 format (pre-BIP155)'
assert (self.net == self.NET_IPV4)
r = b''
if with_time:
r += struct.pack('<I', self.time)
r += struct.pack('<Q', self.nServices)
r += ((b'\x00' * 10) + (b'\xff' * 2))
r += socket.inet_aton(self.ip)
r += struct.pack('>H', self.port)
return r | -2,852,409,591,392,299,000 | Serialize in addrv1 format (pre-BIP155) | test/functional/test_framework/messages.py | serialize | UFO-ETL/ufo | python | def serialize(self, *, with_time=True):
assert (self.net == self.NET_IPV4)
r = b
if with_time:
r += struct.pack('<I', self.time)
r += struct.pack('<Q', self.nServices)
r += ((b'\x00' * 10) + (b'\xff' * 2))
r += socket.inet_aton(self.ip)
r += struct.pack('>H', self.port)
return r |
def deserialize_v2(self, f):
'Deserialize from addrv2 format (BIP155)'
self.time = struct.unpack('<I', f.read(4))[0]
self.nServices = deser_compact_size(f)
self.net = struct.unpack('B', f.read(1))[0]
assert (self.net == self.NET_IPV4)
address_length = deser_compact_size(f)
assert (address_length == self.ADDRV2_ADDRESS_LENGTH[self.net])
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack('>H', f.read(2))[0] | 1,848,882,561,157,270,800 | Deserialize from addrv2 format (BIP155) | test/functional/test_framework/messages.py | deserialize_v2 | UFO-ETL/ufo | python | def deserialize_v2(self, f):
self.time = struct.unpack('<I', f.read(4))[0]
self.nServices = deser_compact_size(f)
self.net = struct.unpack('B', f.read(1))[0]
assert (self.net == self.NET_IPV4)
address_length = deser_compact_size(f)
assert (address_length == self.ADDRV2_ADDRESS_LENGTH[self.net])
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack('>H', f.read(2))[0] |
def serialize_v2(self):
'Serialize in addrv2 format (BIP155)'
assert (self.net == self.NET_IPV4)
r = b''
r += struct.pack('<I', self.time)
r += ser_compact_size(self.nServices)
r += struct.pack('B', self.net)
r += ser_compact_size(self.ADDRV2_ADDRESS_LENGTH[self.net])
r += socket.inet_aton(self.ip)
r += struct.pack('>H', self.port)
return r | 8,858,076,283,761,676,000 | Serialize in addrv2 format (BIP155) | test/functional/test_framework/messages.py | serialize_v2 | UFO-ETL/ufo | python | def serialize_v2(self):
assert (self.net == self.NET_IPV4)
r = b
r += struct.pack('<I', self.time)
r += ser_compact_size(self.nServices)
r += struct.pack('B', self.net)
r += ser_compact_size(self.ADDRV2_ADDRESS_LENGTH[self.net])
r += socket.inet_aton(self.ip)
r += struct.pack('>H', self.port)
return r |
def getFile(url, destdir, filename='', quiet=None) -> bool:
"download file from 'url' into 'destdir'"
if (quiet is None):
quiet = CraftCore.settings.getboolean('ContinuousIntegration', 'Enabled', False)
CraftCore.log.debug(('getFile called. url: %s' % url))
if (url == ''):
CraftCore.log.error('fetch: no url given')
return False
pUrl = urllib.parse.urlparse(url)
if (not filename):
filename = os.path.basename(pUrl.path)
utils.createDir(destdir)
if (pUrl.scheme == 's3'):
return s3File(url, destdir, filename)
elif (pUrl.scheme == 'minio'):
return minioGet((pUrl.netloc + pUrl.path), destdir, filename)
if ((not CraftCore.compiler.isWindows) or os.path.exists(os.path.join(CraftCore.standardDirs.etcDir(), 'cacert.pem'))):
if (not CraftCore.settings.getboolean('General', 'NoWget')):
if CraftCore.cache.findApplication('wget'):
return wgetFile(url, destdir, filename, quiet)
if CraftCore.cache.findApplication('curl'):
return curlFile(url, destdir, filename, quiet)
if os.path.exists(os.path.join(destdir, filename)):
return True
powershell = CraftCore.cache.findApplication('powershell')
if powershell:
filename = os.path.join(destdir, filename)
return utils.system([powershell, '-NoProfile', '-ExecutionPolicy', 'ByPass', '-Command', f'[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; (new-object net.webclient).DownloadFile("{url}", "{filename}")'])
else:
def dlProgress(count, blockSize, totalSize):
if (totalSize != (- 1)):
percent = int((((count * blockSize) * 100) / totalSize))
utils.printProgress(percent)
else:
sys.stdout.write(('\r%s bytes downloaded' % (count * blockSize)))
sys.stdout.flush()
try:
urllib.request.urlretrieve(url, filename=os.path.join(destdir, filename), reporthook=(dlProgress if (CraftCore.debug.verbose() >= 0) else None))
except Exception as e:
CraftCore.log.warning(e)
return False
if (CraftCore.debug.verbose() >= 0):
sys.stdout.write('\n')
sys.stdout.flush()
return True | -6,489,146,965,634,421,000 | download file from 'url' into 'destdir' | bin/Utils/GetFiles.py | getFile | C-EO/craft | python | def getFile(url, destdir, filename=, quiet=None) -> bool:
if (quiet is None):
quiet = CraftCore.settings.getboolean('ContinuousIntegration', 'Enabled', False)
CraftCore.log.debug(('getFile called. url: %s' % url))
if (url == ):
CraftCore.log.error('fetch: no url given')
return False
pUrl = urllib.parse.urlparse(url)
if (not filename):
filename = os.path.basename(pUrl.path)
utils.createDir(destdir)
if (pUrl.scheme == 's3'):
return s3File(url, destdir, filename)
elif (pUrl.scheme == 'minio'):
return minioGet((pUrl.netloc + pUrl.path), destdir, filename)
if ((not CraftCore.compiler.isWindows) or os.path.exists(os.path.join(CraftCore.standardDirs.etcDir(), 'cacert.pem'))):
if (not CraftCore.settings.getboolean('General', 'NoWget')):
if CraftCore.cache.findApplication('wget'):
return wgetFile(url, destdir, filename, quiet)
if CraftCore.cache.findApplication('curl'):
return curlFile(url, destdir, filename, quiet)
if os.path.exists(os.path.join(destdir, filename)):
return True
powershell = CraftCore.cache.findApplication('powershell')
if powershell:
filename = os.path.join(destdir, filename)
return utils.system([powershell, '-NoProfile', '-ExecutionPolicy', 'ByPass', '-Command', f'[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; (new-object net.webclient).DownloadFile("{url}", "{filename}")'])
else:
def dlProgress(count, blockSize, totalSize):
if (totalSize != (- 1)):
percent = int((((count * blockSize) * 100) / totalSize))
utils.printProgress(percent)
else:
sys.stdout.write(('\r%s bytes downloaded' % (count * blockSize)))
sys.stdout.flush()
try:
urllib.request.urlretrieve(url, filename=os.path.join(destdir, filename), reporthook=(dlProgress if (CraftCore.debug.verbose() >= 0) else None))
except Exception as e:
CraftCore.log.warning(e)
return False
if (CraftCore.debug.verbose() >= 0):
sys.stdout.write('\n')
sys.stdout.flush()
return True |
def curlFile(url, destdir, filename, quiet):
"download file with curl from 'url' into 'destdir', if filename is given to the file specified"
curl = CraftCore.cache.findApplication('curl')
command = [curl, '-C', '-', '--retry', '10', '-L', '--ftp-ssl', '--fail']
cert = os.path.join(CraftCore.standardDirs.etcDir(), 'cacert.pem')
if os.path.exists(cert):
command += ['--cacert', cert]
command += ['--max-redirs', '50']
command += ['-o', os.path.join(destdir, filename)]
command += [url]
CraftCore.log.debug('curlfile called')
if (CraftCore.debug.verbose() < 1):
if quiet:
with io.StringIO() as tmp:
ciMode = CraftCore.settings.getboolean('ContinuousIntegration', 'Enabled', False)
if ciMode:
command += ['-v']
if (not utils.system(command, logCommand=ciMode, stdout=tmp, stderr=subprocess.STDOUT)):
CraftCore.log.warning(tmp.getvalue())
return False
if ciMode:
loc = re.findall('Host: ([^\\s]+)', tmp.getvalue())
if loc:
CraftCore.log.info(f'Downloaded from: {loc[(- 1)]}')
return True
elif CraftCore.cache.checkCommandOutputFor(curl, '--progress-bar'):
command += ['--progress-bar']
CraftCore.log.info(f'curl {url}')
return utils.system(command, displayProgress=True, logCommand=False, stderr=subprocess.STDOUT)
command += ['-v']
return utils.system(command) | -9,166,799,275,081,705,000 | download file with curl from 'url' into 'destdir', if filename is given to the file specified | bin/Utils/GetFiles.py | curlFile | C-EO/craft | python | def curlFile(url, destdir, filename, quiet):
curl = CraftCore.cache.findApplication('curl')
command = [curl, '-C', '-', '--retry', '10', '-L', '--ftp-ssl', '--fail']
cert = os.path.join(CraftCore.standardDirs.etcDir(), 'cacert.pem')
if os.path.exists(cert):
command += ['--cacert', cert]
command += ['--max-redirs', '50']
command += ['-o', os.path.join(destdir, filename)]
command += [url]
CraftCore.log.debug('curlfile called')
if (CraftCore.debug.verbose() < 1):
if quiet:
with io.StringIO() as tmp:
ciMode = CraftCore.settings.getboolean('ContinuousIntegration', 'Enabled', False)
if ciMode:
command += ['-v']
if (not utils.system(command, logCommand=ciMode, stdout=tmp, stderr=subprocess.STDOUT)):
CraftCore.log.warning(tmp.getvalue())
return False
if ciMode:
loc = re.findall('Host: ([^\\s]+)', tmp.getvalue())
if loc:
CraftCore.log.info(f'Downloaded from: {loc[(- 1)]}')
return True
elif CraftCore.cache.checkCommandOutputFor(curl, '--progress-bar'):
command += ['--progress-bar']
CraftCore.log.info(f'curl {url}')
return utils.system(command, displayProgress=True, logCommand=False, stderr=subprocess.STDOUT)
command += ['-v']
return utils.system(command) |
def wgetFile(url, destdir, filename, quiet):
"download file with wget from 'url' into 'destdir', if filename is given to the file specified"
wget = CraftCore.cache.findApplication('wget')
command = [wget, '-c', '-t', '10']
cert = os.path.join(CraftCore.standardDirs.etcDir(), 'cacert.pem')
if os.path.exists(cert):
command += ['--ca-certificate', cert]
command += ['--max-redirect', '50']
if CraftCore.settings.getboolean('General', 'EMERGE_NO_PASSIVE_FTP', False):
command += ['--no-passive-ftp']
if (not filename):
command += ['-P', destdir]
else:
command += ['-O', os.path.join(destdir, filename)]
command += [url]
if (CraftCore.debug.verbose() < 1):
if quiet:
with io.StringIO() as tmp:
ciMode = CraftCore.settings.getboolean('ContinuousIntegration', 'Enabled', False)
if (not utils.system(command, logCommand=ciMode, stdout=tmp, stderr=subprocess.STDOUT)):
CraftCore.log.warning(tmp.getvalue())
return False
if ciMode:
loc = re.findall('Location: ([^\\s]+)', tmp.getvalue())
if loc:
CraftCore.log.info(f'Downloaded from: {loc[(- 1)]}')
return True
elif CraftCore.cache.checkCommandOutputFor(wget, '--show-progress'):
command += ['-q', '--show-progress']
CraftCore.log.info(f'wget {url}')
return utils.system(command, displayProgress=True, logCommand=False, stderr=subprocess.STDOUT)
return utils.system(command) | -2,021,340,553,555,407,400 | download file with wget from 'url' into 'destdir', if filename is given to the file specified | bin/Utils/GetFiles.py | wgetFile | C-EO/craft | python | def wgetFile(url, destdir, filename, quiet):
wget = CraftCore.cache.findApplication('wget')
command = [wget, '-c', '-t', '10']
cert = os.path.join(CraftCore.standardDirs.etcDir(), 'cacert.pem')
if os.path.exists(cert):
command += ['--ca-certificate', cert]
command += ['--max-redirect', '50']
if CraftCore.settings.getboolean('General', 'EMERGE_NO_PASSIVE_FTP', False):
command += ['--no-passive-ftp']
if (not filename):
command += ['-P', destdir]
else:
command += ['-O', os.path.join(destdir, filename)]
command += [url]
if (CraftCore.debug.verbose() < 1):
if quiet:
with io.StringIO() as tmp:
ciMode = CraftCore.settings.getboolean('ContinuousIntegration', 'Enabled', False)
if (not utils.system(command, logCommand=ciMode, stdout=tmp, stderr=subprocess.STDOUT)):
CraftCore.log.warning(tmp.getvalue())
return False
if ciMode:
loc = re.findall('Location: ([^\\s]+)', tmp.getvalue())
if loc:
CraftCore.log.info(f'Downloaded from: {loc[(- 1)]}')
return True
elif CraftCore.cache.checkCommandOutputFor(wget, '--show-progress'):
command += ['-q', '--show-progress']
CraftCore.log.info(f'wget {url}')
return utils.system(command, displayProgress=True, logCommand=False, stderr=subprocess.STDOUT)
return utils.system(command) |
def pfba_gapfill(model, reaction_bag, obj=None, obj_lb=10.0, obj_constraint=False, iters=1, tasks=None, task_lb=0.05, add_exchanges=True, extracellular='e', cores=4):
'\n Function that utilizes iterations of pFBA solution with a universal reaction bag \n in order to gapfill a model.\n \n Parameters\n ----------\n model : cobra.Model\n Model to be gapfilled\n reaction_bag : cobra.Model\n Reaction bag reference to use during gapfilling\n obj : string\n Reaction ID for objective function in model to be gapfilled.\n obj_lb : float\n Lower bound for objective function\n obj_constraint : bool\n Sets objective as contstraint which must be maximized\n tasks : list or None\n List of reactions IDs (strings) of metabolic tasks \n to set a minimum lower bound for\n task_lb : float\n Lower bound for any metabolic tasks\n iters : int\n Number of gapfilling rounds. Unique reactions from each round are \n saved and the union is added simulatneously to the model\n add_exchanges : bool\n Identifies extracellular metabolites added during gapfilling that\n are not associated with exchange reactions and creates them\n extracellular : string\n Label for extracellular compartment of model\n cores : int\n Number of processors to utilize during flux sampling\n '
start_time = time.time()
orig_rxn_ids = set([str(x.id) for x in model.reactions])
orig_cpd_ids = set([str(y.id) for y in model.metabolites])
univ_rxn_ids = set([str(z.id) for z in reaction_bag.reactions])
overlap_rxn_ids = univ_rxn_ids.intersection(orig_rxn_ids)
if (obj == None):
obj = get_objective(model)
else:
obj = obj
new_rxn_ids = set()
print('Creating universal model...')
with reaction_bag as universal:
for rxn in overlap_rxn_ids:
universal.reactions.get_by_id(rxn).remove_from_model()
if obj_constraint:
universal.add_reactions([model.reactions.get_by_id(obj)])
universal.objective = obj
orig_rxn_ids.remove(obj)
orig_rxns = []
for rxn in orig_rxn_ids:
orig_rxns.append(copy.deepcopy(model.reactions.get_by_id(rxn)))
else:
orig_rxns = list(copy.deepcopy(model.reactions))
add_pfba(universal)
universal.add_reactions(orig_rxns)
if (not obj_constraint):
universal.reactions.get_by_id(obj).lower_bound = obj_lb
if (tasks != None):
for task in tasks:
try:
universal.reactions.get_by_id(task).lower_bound = task_lb
except:
print((task + 'not found in model. Ignoring.'))
continue
print('Optimizing model with combined reactions...')
solution = universal.optimize()
if (iters > 1):
print('Generating flux sampling object...')
sutil.fix_objective_as_constraint(universal, fraction=0.99)
optgp_object = OptGPSampler(universal, processes=cores)
print((('Sampling ' + str(iters)) + ' flux distributions...'))
flux_samples = optgp_object.sample(iters)
rxns = list(flux_samples.columns)
for distribution in flux_samples.iterrows():
for flux in range(0, len(list(distribution[1]))):
if (abs(list(distribution[1])[flux]) > 1e-06):
new_rxn_ids |= set([rxns[flux]]).difference(orig_rxn_ids)
else:
rxns = list(solution.fluxes.index)
fluxes = list(solution.fluxes)
for flux in range(0, len(fluxes)):
if (abs(fluxes[flux]) > 1e-06):
new_rxn_ids |= set([rxns[flux]])
if (obj in new_rxn_ids):
new_rxn_ids.remove(obj)
for rxn in orig_rxn_ids:
try:
new_rxn_ids.remove(rxn)
except:
continue
print('Gapfilling model...')
new_rxns = copy.deepcopy([reaction_bag.reactions.get_by_id(rxn) for rxn in new_rxn_ids])
new_cpd_ids = set()
for rxn in new_rxns:
new_cpd_ids |= set([str(x.id) for x in list(rxn.metabolites)])
new_cpd_ids = new_cpd_ids.difference(orig_cpd_ids)
new_cpds = copy.deepcopy([reaction_bag.metabolites.get_by_id(cpd) for cpd in new_cpd_ids])
new_model = copy.deepcopy(model)
new_model.add_metabolites(new_cpds)
new_model.add_reactions(new_rxns)
if (add_exchanges == True):
new_exchanges = extend_exchanges(new_model, new_cpd_ids, extracellular)
if (len(new_exchanges) > 0):
new_rxn_ids |= new_exchanges
duration = int(round((time.time() - start_time)))
print((((((('Took ' + str(duration)) + ' seconds to gapfill ') + str(len(new_rxn_ids))) + ' reactions and ') + str(len(new_cpd_ids))) + ' metabolites.'))
new_obj_val = new_model.slim_optimize()
if (new_obj_val > 1e-06):
print((('Gapfilled model objective now carries flux (' + str(new_obj_val)) + ').'))
else:
print('Gapfilled model objective still does not carry flux.')
return new_model | 4,643,361,251,913,184,000 | Function that utilizes iterations of pFBA solution with a universal reaction bag
in order to gapfill a model.
Parameters
----------
model : cobra.Model
Model to be gapfilled
reaction_bag : cobra.Model
Reaction bag reference to use during gapfilling
obj : string
Reaction ID for objective function in model to be gapfilled.
obj_lb : float
Lower bound for objective function
obj_constraint : bool
Sets objective as contstraint which must be maximized
tasks : list or None
List of reactions IDs (strings) of metabolic tasks
to set a minimum lower bound for
task_lb : float
Lower bound for any metabolic tasks
iters : int
Number of gapfilling rounds. Unique reactions from each round are
saved and the union is added simulatneously to the model
add_exchanges : bool
Identifies extracellular metabolites added during gapfilling that
are not associated with exchange reactions and creates them
extracellular : string
Label for extracellular compartment of model
cores : int
Number of processors to utilize during flux sampling | pfba_gapfiller.py | pfba_gapfill | csbl/CSBL-code-repo | python | def pfba_gapfill(model, reaction_bag, obj=None, obj_lb=10.0, obj_constraint=False, iters=1, tasks=None, task_lb=0.05, add_exchanges=True, extracellular='e', cores=4):
'\n Function that utilizes iterations of pFBA solution with a universal reaction bag \n in order to gapfill a model.\n \n Parameters\n ----------\n model : cobra.Model\n Model to be gapfilled\n reaction_bag : cobra.Model\n Reaction bag reference to use during gapfilling\n obj : string\n Reaction ID for objective function in model to be gapfilled.\n obj_lb : float\n Lower bound for objective function\n obj_constraint : bool\n Sets objective as contstraint which must be maximized\n tasks : list or None\n List of reactions IDs (strings) of metabolic tasks \n to set a minimum lower bound for\n task_lb : float\n Lower bound for any metabolic tasks\n iters : int\n Number of gapfilling rounds. Unique reactions from each round are \n saved and the union is added simulatneously to the model\n add_exchanges : bool\n Identifies extracellular metabolites added during gapfilling that\n are not associated with exchange reactions and creates them\n extracellular : string\n Label for extracellular compartment of model\n cores : int\n Number of processors to utilize during flux sampling\n '
start_time = time.time()
orig_rxn_ids = set([str(x.id) for x in model.reactions])
orig_cpd_ids = set([str(y.id) for y in model.metabolites])
univ_rxn_ids = set([str(z.id) for z in reaction_bag.reactions])
overlap_rxn_ids = univ_rxn_ids.intersection(orig_rxn_ids)
if (obj == None):
obj = get_objective(model)
else:
obj = obj
new_rxn_ids = set()
print('Creating universal model...')
with reaction_bag as universal:
for rxn in overlap_rxn_ids:
universal.reactions.get_by_id(rxn).remove_from_model()
if obj_constraint:
universal.add_reactions([model.reactions.get_by_id(obj)])
universal.objective = obj
orig_rxn_ids.remove(obj)
orig_rxns = []
for rxn in orig_rxn_ids:
orig_rxns.append(copy.deepcopy(model.reactions.get_by_id(rxn)))
else:
orig_rxns = list(copy.deepcopy(model.reactions))
add_pfba(universal)
universal.add_reactions(orig_rxns)
if (not obj_constraint):
universal.reactions.get_by_id(obj).lower_bound = obj_lb
if (tasks != None):
for task in tasks:
try:
universal.reactions.get_by_id(task).lower_bound = task_lb
except:
print((task + 'not found in model. Ignoring.'))
continue
print('Optimizing model with combined reactions...')
solution = universal.optimize()
if (iters > 1):
print('Generating flux sampling object...')
sutil.fix_objective_as_constraint(universal, fraction=0.99)
optgp_object = OptGPSampler(universal, processes=cores)
print((('Sampling ' + str(iters)) + ' flux distributions...'))
flux_samples = optgp_object.sample(iters)
rxns = list(flux_samples.columns)
for distribution in flux_samples.iterrows():
for flux in range(0, len(list(distribution[1]))):
if (abs(list(distribution[1])[flux]) > 1e-06):
new_rxn_ids |= set([rxns[flux]]).difference(orig_rxn_ids)
else:
rxns = list(solution.fluxes.index)
fluxes = list(solution.fluxes)
for flux in range(0, len(fluxes)):
if (abs(fluxes[flux]) > 1e-06):
new_rxn_ids |= set([rxns[flux]])
if (obj in new_rxn_ids):
new_rxn_ids.remove(obj)
for rxn in orig_rxn_ids:
try:
new_rxn_ids.remove(rxn)
except:
continue
print('Gapfilling model...')
new_rxns = copy.deepcopy([reaction_bag.reactions.get_by_id(rxn) for rxn in new_rxn_ids])
new_cpd_ids = set()
for rxn in new_rxns:
new_cpd_ids |= set([str(x.id) for x in list(rxn.metabolites)])
new_cpd_ids = new_cpd_ids.difference(orig_cpd_ids)
new_cpds = copy.deepcopy([reaction_bag.metabolites.get_by_id(cpd) for cpd in new_cpd_ids])
new_model = copy.deepcopy(model)
new_model.add_metabolites(new_cpds)
new_model.add_reactions(new_rxns)
if (add_exchanges == True):
new_exchanges = extend_exchanges(new_model, new_cpd_ids, extracellular)
if (len(new_exchanges) > 0):
new_rxn_ids |= new_exchanges
duration = int(round((time.time() - start_time)))
print((((((('Took ' + str(duration)) + ' seconds to gapfill ') + str(len(new_rxn_ids))) + ' reactions and ') + str(len(new_cpd_ids))) + ' metabolites.'))
new_obj_val = new_model.slim_optimize()
if (new_obj_val > 1e-06):
print((('Gapfilled model objective now carries flux (' + str(new_obj_val)) + ').'))
else:
print('Gapfilled model objective still does not carry flux.')
return new_model |
def list(self, **kwargs: Any) -> AsyncIterable['_models.ContainerGroupListResult']:
'Get a list of container groups in the specified subscription.\n\n Get a list of container groups in the specified subscription. This operation returns properties\n of each container group including containers, image registry credentials, restart policy, IP\n address type, OS type, state, and volumes.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either ContainerGroupListResult or the result of cls(response)\n :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerinstance.models.ContainerGroupListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2021-03-01'
accept = 'application/json'
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
if (not next_link):
url = self.list.metadata['url']
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ContainerGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return ((deserialized.next_link or None), AsyncList(list_of_elem))
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs))
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data) | -2,796,484,973,899,663,000 | Get a list of container groups in the specified subscription.
Get a list of container groups in the specified subscription. This operation returns properties
of each container group including containers, image registry credentials, restart policy, IP
address type, OS type, state, and volumes.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ContainerGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerinstance.models.ContainerGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError | sdk/containerinstance/azure-mgmt-containerinstance/azure/mgmt/containerinstance/aio/operations/_container_groups_operations.py | list | Codejune/azure-sdk-for-python | python | def list(self, **kwargs: Any) -> AsyncIterable['_models.ContainerGroupListResult']:
'Get a list of container groups in the specified subscription.\n\n Get a list of container groups in the specified subscription. This operation returns properties\n of each container group including containers, image registry credentials, restart policy, IP\n address type, OS type, state, and volumes.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either ContainerGroupListResult or the result of cls(response)\n :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerinstance.models.ContainerGroupListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2021-03-01'
accept = 'application/json'
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
if (not next_link):
url = self.list.metadata['url']
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ContainerGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return ((deserialized.next_link or None), AsyncList(list_of_elem))
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs))
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data) |
def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> AsyncIterable['_models.ContainerGroupListResult']:
'Get a list of container groups in the specified subscription and resource group.\n\n Get a list of container groups in a specified subscription and resource group. This operation\n returns properties of each container group including containers, image registry credentials,\n restart policy, IP address type, OS type, state, and volumes.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either ContainerGroupListResult or the result of cls(response)\n :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerinstance.models.ContainerGroupListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2021-03-01'
accept = 'application/json'
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
if (not next_link):
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ContainerGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return ((deserialized.next_link or None), AsyncList(list_of_elem))
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs))
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data) | -705,786,126,377,228,700 | Get a list of container groups in the specified subscription and resource group.
Get a list of container groups in a specified subscription and resource group. This operation
returns properties of each container group including containers, image registry credentials,
restart policy, IP address type, OS type, state, and volumes.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ContainerGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerinstance.models.ContainerGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError | sdk/containerinstance/azure-mgmt-containerinstance/azure/mgmt/containerinstance/aio/operations/_container_groups_operations.py | list_by_resource_group | Codejune/azure-sdk-for-python | python | def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> AsyncIterable['_models.ContainerGroupListResult']:
'Get a list of container groups in the specified subscription and resource group.\n\n Get a list of container groups in a specified subscription and resource group. This operation\n returns properties of each container group including containers, image registry credentials,\n restart policy, IP address type, OS type, state, and volumes.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either ContainerGroupListResult or the result of cls(response)\n :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerinstance.models.ContainerGroupListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2021-03-01'
accept = 'application/json'
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
if (not next_link):
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ContainerGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return ((deserialized.next_link or None), AsyncList(list_of_elem))
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs))
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data) |
async def get(self, resource_group_name: str, container_group_name: str, **kwargs: Any) -> '_models.ContainerGroup':
'Get the properties of the specified container group.\n\n Gets the properties of the specified container group in the specified subscription and resource\n group. The operation returns the properties of each container group including containers, image\n registry credentials, restart policy, IP address type, OS type, state, and volumes.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param container_group_name: The name of the container group.\n :type container_group_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: ContainerGroup, or the result of cls(response)\n :rtype: ~azure.mgmt.containerinstance.models.ContainerGroup\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2021-03-01'
accept = 'application/json'
url = self.get.metadata['url']
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'containerGroupName': self._serialize.url('container_group_name', container_group_name, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs))
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ContainerGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized | 1,874,300,566,631,751,400 | Get the properties of the specified container group.
Gets the properties of the specified container group in the specified subscription and resource
group. The operation returns the properties of each container group including containers, image
registry credentials, restart policy, IP address type, OS type, state, and volumes.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param container_group_name: The name of the container group.
:type container_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ContainerGroup, or the result of cls(response)
:rtype: ~azure.mgmt.containerinstance.models.ContainerGroup
:raises: ~azure.core.exceptions.HttpResponseError | sdk/containerinstance/azure-mgmt-containerinstance/azure/mgmt/containerinstance/aio/operations/_container_groups_operations.py | get | Codejune/azure-sdk-for-python | python | async def get(self, resource_group_name: str, container_group_name: str, **kwargs: Any) -> '_models.ContainerGroup':
'Get the properties of the specified container group.\n\n Gets the properties of the specified container group in the specified subscription and resource\n group. The operation returns the properties of each container group including containers, image\n registry credentials, restart policy, IP address type, OS type, state, and volumes.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param container_group_name: The name of the container group.\n :type container_group_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: ContainerGroup, or the result of cls(response)\n :rtype: ~azure.mgmt.containerinstance.models.ContainerGroup\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2021-03-01'
accept = 'application/json'
url = self.get.metadata['url']
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'containerGroupName': self._serialize.url('container_group_name', container_group_name, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs))
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ContainerGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized |
async def begin_create_or_update(self, resource_group_name: str, container_group_name: str, container_group: '_models.ContainerGroup', **kwargs: Any) -> AsyncLROPoller['_models.ContainerGroup']:
'Create or update container groups.\n\n Create or update container groups with specified configurations.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param container_group_name: The name of the container group.\n :type container_group_name: str\n :param container_group: The properties of the container group to be created or updated.\n :type container_group: ~azure.mgmt.containerinstance.models.ContainerGroup\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either ContainerGroup or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerinstance.models.ContainerGroup]\n :raises ~azure.core.exceptions.HttpResponseError:\n '
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = (await self._create_or_update_initial(resource_group_name=resource_group_name, container_group_name=container_group_name, container_group=container_group, cls=(lambda x, y, z: x), **kwargs))
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ContainerGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'containerGroupName': self._serialize.url('container_group_name', container_group_name, 'str')}
if (polling is True):
polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) | 8,407,050,112,432,623,000 | Create or update container groups.
Create or update container groups with specified configurations.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param container_group_name: The name of the container group.
:type container_group_name: str
:param container_group: The properties of the container group to be created or updated.
:type container_group: ~azure.mgmt.containerinstance.models.ContainerGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ContainerGroup or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerinstance.models.ContainerGroup]
:raises ~azure.core.exceptions.HttpResponseError: | sdk/containerinstance/azure-mgmt-containerinstance/azure/mgmt/containerinstance/aio/operations/_container_groups_operations.py | begin_create_or_update | Codejune/azure-sdk-for-python | python | async def begin_create_or_update(self, resource_group_name: str, container_group_name: str, container_group: '_models.ContainerGroup', **kwargs: Any) -> AsyncLROPoller['_models.ContainerGroup']:
'Create or update container groups.\n\n Create or update container groups with specified configurations.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param container_group_name: The name of the container group.\n :type container_group_name: str\n :param container_group: The properties of the container group to be created or updated.\n :type container_group: ~azure.mgmt.containerinstance.models.ContainerGroup\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either ContainerGroup or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerinstance.models.ContainerGroup]\n :raises ~azure.core.exceptions.HttpResponseError:\n '
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = (await self._create_or_update_initial(resource_group_name=resource_group_name, container_group_name=container_group_name, container_group=container_group, cls=(lambda x, y, z: x), **kwargs))
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ContainerGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'containerGroupName': self._serialize.url('container_group_name', container_group_name, 'str')}
if (polling is True):
polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) |
async def update(self, resource_group_name: str, container_group_name: str, resource: '_models.Resource', **kwargs: Any) -> '_models.ContainerGroup':
'Update container groups.\n\n Updates container group tags with specified values.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param container_group_name: The name of the container group.\n :type container_group_name: str\n :param resource: The container group resource with just the tags to be updated.\n :type resource: ~azure.mgmt.containerinstance.models.Resource\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: ContainerGroup, or the result of cls(response)\n :rtype: ~azure.mgmt.containerinstance.models.ContainerGroup\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2021-03-01'
content_type = kwargs.pop('content_type', 'application/json')
accept = 'application/json'
url = self.update.metadata['url']
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'containerGroupName': self._serialize.url('container_group_name', container_group_name, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header('content_type', content_type, 'str')
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(resource, 'Resource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs))
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ContainerGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized | 4,709,774,830,064,881,000 | Update container groups.
Updates container group tags with specified values.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param container_group_name: The name of the container group.
:type container_group_name: str
:param resource: The container group resource with just the tags to be updated.
:type resource: ~azure.mgmt.containerinstance.models.Resource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ContainerGroup, or the result of cls(response)
:rtype: ~azure.mgmt.containerinstance.models.ContainerGroup
:raises: ~azure.core.exceptions.HttpResponseError | sdk/containerinstance/azure-mgmt-containerinstance/azure/mgmt/containerinstance/aio/operations/_container_groups_operations.py | update | Codejune/azure-sdk-for-python | python | async def update(self, resource_group_name: str, container_group_name: str, resource: '_models.Resource', **kwargs: Any) -> '_models.ContainerGroup':
'Update container groups.\n\n Updates container group tags with specified values.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param container_group_name: The name of the container group.\n :type container_group_name: str\n :param resource: The container group resource with just the tags to be updated.\n :type resource: ~azure.mgmt.containerinstance.models.Resource\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: ContainerGroup, or the result of cls(response)\n :rtype: ~azure.mgmt.containerinstance.models.ContainerGroup\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2021-03-01'
content_type = kwargs.pop('content_type', 'application/json')
accept = 'application/json'
url = self.update.metadata['url']
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'containerGroupName': self._serialize.url('container_group_name', container_group_name, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header('content_type', content_type, 'str')
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(resource, 'Resource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs))
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ContainerGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized |
async def begin_delete(self, resource_group_name: str, container_group_name: str, **kwargs: Any) -> AsyncLROPoller['_models.ContainerGroup']:
'Delete the specified container group.\n\n Delete the specified container group in the specified subscription and resource group. The\n operation does not delete other resources provided by the user, such as volumes.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param container_group_name: The name of the container group.\n :type container_group_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either ContainerGroup or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerinstance.models.ContainerGroup]\n :raises ~azure.core.exceptions.HttpResponseError:\n '
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = (await self._delete_initial(resource_group_name=resource_group_name, container_group_name=container_group_name, cls=(lambda x, y, z: x), **kwargs))
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ContainerGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'containerGroupName': self._serialize.url('container_group_name', container_group_name, 'str')}
if (polling is True):
polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) | 5,706,097,528,254,374,000 | Delete the specified container group.
Delete the specified container group in the specified subscription and resource group. The
operation does not delete other resources provided by the user, such as volumes.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param container_group_name: The name of the container group.
:type container_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ContainerGroup or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerinstance.models.ContainerGroup]
:raises ~azure.core.exceptions.HttpResponseError: | sdk/containerinstance/azure-mgmt-containerinstance/azure/mgmt/containerinstance/aio/operations/_container_groups_operations.py | begin_delete | Codejune/azure-sdk-for-python | python | async def begin_delete(self, resource_group_name: str, container_group_name: str, **kwargs: Any) -> AsyncLROPoller['_models.ContainerGroup']:
'Delete the specified container group.\n\n Delete the specified container group in the specified subscription and resource group. The\n operation does not delete other resources provided by the user, such as volumes.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param container_group_name: The name of the container group.\n :type container_group_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either ContainerGroup or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerinstance.models.ContainerGroup]\n :raises ~azure.core.exceptions.HttpResponseError:\n '
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = (await self._delete_initial(resource_group_name=resource_group_name, container_group_name=container_group_name, cls=(lambda x, y, z: x), **kwargs))
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ContainerGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'containerGroupName': self._serialize.url('container_group_name', container_group_name, 'str')}
if (polling is True):
polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) |
async def begin_restart(self, resource_group_name: str, container_group_name: str, **kwargs: Any) -> AsyncLROPoller[None]:
'Restarts all containers in a container group.\n\n Restarts all containers in a container group in place. If container image has updates, new\n image will be downloaded.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param container_group_name: The name of the container group.\n :type container_group_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[None]\n :raises ~azure.core.exceptions.HttpResponseError:\n '
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = (await self._restart_initial(resource_group_name=resource_group_name, container_group_name=container_group_name, cls=(lambda x, y, z: x), **kwargs))
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'containerGroupName': self._serialize.url('container_group_name', container_group_name, 'str')}
if (polling is True):
polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) | 5,351,700,045,365,024,000 | Restarts all containers in a container group.
Restarts all containers in a container group in place. If container image has updates, new
image will be downloaded.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param container_group_name: The name of the container group.
:type container_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError: | sdk/containerinstance/azure-mgmt-containerinstance/azure/mgmt/containerinstance/aio/operations/_container_groups_operations.py | begin_restart | Codejune/azure-sdk-for-python | python | async def begin_restart(self, resource_group_name: str, container_group_name: str, **kwargs: Any) -> AsyncLROPoller[None]:
'Restarts all containers in a container group.\n\n Restarts all containers in a container group in place. If container image has updates, new\n image will be downloaded.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param container_group_name: The name of the container group.\n :type container_group_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[None]\n :raises ~azure.core.exceptions.HttpResponseError:\n '
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = (await self._restart_initial(resource_group_name=resource_group_name, container_group_name=container_group_name, cls=(lambda x, y, z: x), **kwargs))
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'containerGroupName': self._serialize.url('container_group_name', container_group_name, 'str')}
if (polling is True):
polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) |
async def stop(self, resource_group_name: str, container_group_name: str, **kwargs: Any) -> None:
'Stops all containers in a container group.\n\n Stops all containers in a container group. Compute resources will be deallocated and billing\n will stop.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param container_group_name: The name of the container group.\n :type container_group_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2021-03-01'
accept = 'application/json'
url = self.stop.metadata['url']
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'containerGroupName': self._serialize.url('container_group_name', container_group_name, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs))
response = pipeline_response.http_response
if (response.status_code not in [204]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {}) | 1,902,649,873,740,619,000 | Stops all containers in a container group.
Stops all containers in a container group. Compute resources will be deallocated and billing
will stop.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param container_group_name: The name of the container group.
:type container_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError | sdk/containerinstance/azure-mgmt-containerinstance/azure/mgmt/containerinstance/aio/operations/_container_groups_operations.py | stop | Codejune/azure-sdk-for-python | python | async def stop(self, resource_group_name: str, container_group_name: str, **kwargs: Any) -> None:
'Stops all containers in a container group.\n\n Stops all containers in a container group. Compute resources will be deallocated and billing\n will stop.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param container_group_name: The name of the container group.\n :type container_group_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2021-03-01'
accept = 'application/json'
url = self.stop.metadata['url']
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'containerGroupName': self._serialize.url('container_group_name', container_group_name, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs))
response = pipeline_response.http_response
if (response.status_code not in [204]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {}) |
async def begin_start(self, resource_group_name: str, container_group_name: str, **kwargs: Any) -> AsyncLROPoller[None]:
'Starts all containers in a container group.\n\n Starts all containers in a container group. Compute resources will be allocated and billing\n will start.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param container_group_name: The name of the container group.\n :type container_group_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[None]\n :raises ~azure.core.exceptions.HttpResponseError:\n '
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = (await self._start_initial(resource_group_name=resource_group_name, container_group_name=container_group_name, cls=(lambda x, y, z: x), **kwargs))
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'containerGroupName': self._serialize.url('container_group_name', container_group_name, 'str')}
if (polling is True):
polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) | -8,254,602,122,778,802,000 | Starts all containers in a container group.
Starts all containers in a container group. Compute resources will be allocated and billing
will start.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param container_group_name: The name of the container group.
:type container_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError: | sdk/containerinstance/azure-mgmt-containerinstance/azure/mgmt/containerinstance/aio/operations/_container_groups_operations.py | begin_start | Codejune/azure-sdk-for-python | python | async def begin_start(self, resource_group_name: str, container_group_name: str, **kwargs: Any) -> AsyncLROPoller[None]:
'Starts all containers in a container group.\n\n Starts all containers in a container group. Compute resources will be allocated and billing\n will start.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param container_group_name: The name of the container group.\n :type container_group_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[None]\n :raises ~azure.core.exceptions.HttpResponseError:\n '
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = (await self._start_initial(resource_group_name=resource_group_name, container_group_name=container_group_name, cls=(lambda x, y, z: x), **kwargs))
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'containerGroupName': self._serialize.url('container_group_name', container_group_name, 'str')}
if (polling is True):
polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) |
def sector_length(self, theta: float) -> float:
'Returns the length of a sector of the circle which subtended angle theta(radians) at center.'
return (self.radius * theta) | 2,799,833,291,798,162,000 | Returns the length of a sector of the circle which subtended angle theta(radians) at center. | Geometry/circle.py | sector_length | Lakshmikanth2001/ElectricPy | python | def sector_length(self, theta: float) -> float:
return (self.radius * theta) |
def sector_area(self, theta: float) -> float:
'Returns the area of a sector of the circle which subtended angle theta(radians) at center.'
return (((self.radius ** 2) * theta) / 2) | -8,994,213,026,258,299,000 | Returns the area of a sector of the circle which subtended angle theta(radians) at center. | Geometry/circle.py | sector_area | Lakshmikanth2001/ElectricPy | python | def sector_area(self, theta: float) -> float:
return (((self.radius ** 2) * theta) / 2) |
def confirm(self):
'Selection completed, set the value and close'
o_dict = self.enters
for (index, item) in enumerate(self._labels_):
o_dict[item.text()] = self.fields[index].text()
self.close() | 1,297,856,830,580,686,000 | Selection completed, set the value and close | easygraphics/dialog/multifields.py | confirm | royqh1979/PyEasyGraphics | python | def confirm(self):
o_dict = self.enters
for (index, item) in enumerate(self._labels_):
o_dict[item.text()] = self.fields[index].text()
self.close() |
def enabled():
'\n Allow selection of distutils by environment variable.\n '
which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')
return (which == 'local') | 1,360,317,736,098,824,200 | Allow selection of distutils by environment variable. | DatabaseControlWrapper_JE/venv/Lib/site-packages/_distutils_hack/__init__.py | enabled | JE-Chen/je_old_repo | python | def enabled():
'\n \n '
which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')
return (which == 'local') |
def do_override():
'\n Ensure that the local copy of distutils is preferred over stdlib.\n\n See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401\n for more motivation.\n '
if enabled():
warn_distutils_present()
ensure_local_distutils() | 2,698,934,913,728,264,000 | Ensure that the local copy of distutils is preferred over stdlib.
See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
for more motivation. | DatabaseControlWrapper_JE/venv/Lib/site-packages/_distutils_hack/__init__.py | do_override | JE-Chen/je_old_repo | python | def do_override():
'\n Ensure that the local copy of distutils is preferred over stdlib.\n\n See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401\n for more motivation.\n '
if enabled():
warn_distutils_present()
ensure_local_distutils() |
def spec_for_pip(self):
'\n Ensure stdlib distutils when running under pip.\n See pypa/pip#8761 for rationale.\n '
if self.pip_imported_during_build():
return
clear_distutils()
self.spec_for_distutils = (lambda : None) | 2,112,447,620,777,783,600 | Ensure stdlib distutils when running under pip.
See pypa/pip#8761 for rationale. | DatabaseControlWrapper_JE/venv/Lib/site-packages/_distutils_hack/__init__.py | spec_for_pip | JE-Chen/je_old_repo | python | def spec_for_pip(self):
'\n Ensure stdlib distutils when running under pip.\n See pypa/pip#8761 for rationale.\n '
if self.pip_imported_during_build():
return
clear_distutils()
self.spec_for_distutils = (lambda : None) |
@staticmethod
def pip_imported_during_build():
'\n Detect if pip is being imported in a build script. Ref #2355.\n '
import traceback
return any((frame.f_globals['__file__'].endswith('setup.py') for (frame, line) in traceback.walk_stack(None))) | -5,233,345,824,173,237,000 | Detect if pip is being imported in a build script. Ref #2355. | DatabaseControlWrapper_JE/venv/Lib/site-packages/_distutils_hack/__init__.py | pip_imported_during_build | JE-Chen/je_old_repo | python | @staticmethod
def pip_imported_during_build():
'\n \n '
import traceback
return any((frame.f_globals['__file__'].endswith('setup.py') for (frame, line) in traceback.walk_stack(None))) |
def get(self, filepath):
'Get values according to the filepath.\n\n Args:\n filepath (str | obj:`Path`): Here, filepath is the lmdb key.\n '
filepath = str(filepath)
with self._client.begin(write=False) as txn:
value_buf = txn.get(filepath.encode('ascii'))
return value_buf | -8,301,798,783,937,292,000 | Get values according to the filepath.
Args:
filepath (str | obj:`Path`): Here, filepath is the lmdb key. | mmcv_custom/fileio/file_client.py | get | MendelXu/mmdetection-1 | python | def get(self, filepath):
'Get values according to the filepath.\n\n Args:\n filepath (str | obj:`Path`): Here, filepath is the lmdb key.\n '
filepath = str(filepath)
with self._client.begin(write=False) as txn:
value_buf = txn.get(filepath.encode('ascii'))
return value_buf |
def db_connect():
'\n Performs database connection using database settings from settings.py.\n Returns sqlalchemy engine instance\n '
return create_engine(get_project_settings().get('CONNECTION_STRING')) | -7,243,045,413,308,925,000 | Performs database connection using database settings from settings.py.
Returns sqlalchemy engine instance | src/parliamentbg/parliamentbg/models.py | db_connect | Georgitanev/python38_proj_adata | python | def db_connect():
'\n Performs database connection using database settings from settings.py.\n Returns sqlalchemy engine instance\n '
return create_engine(get_project_settings().get('CONNECTION_STRING')) |
def create_table(engine):
' create tables'
Base.metadata.create_all(engine) | 6,008,615,466,500,185,000 | create tables | src/parliamentbg/parliamentbg/models.py | create_table | Georgitanev/python38_proj_adata | python | def create_table(engine):
' '
Base.metadata.create_all(engine) |
def Dispose(self):
' Dispose(self: RevitLinkOperations) '
pass | -6,751,632,744,207,618,000 | Dispose(self: RevitLinkOperations) | release/stubs.min/Autodesk/Revit/DB/__init___parts/RevitLinkOperations.py | Dispose | BCSharp/ironpython-stubs | python | def Dispose(self):
' '
pass |
def ReleaseUnmanagedResources(self, *args):
' ReleaseUnmanagedResources(self: RevitLinkOperations,disposing: bool) '
pass | 7,246,293,472,291,897,000 | ReleaseUnmanagedResources(self: RevitLinkOperations,disposing: bool) | release/stubs.min/Autodesk/Revit/DB/__init___parts/RevitLinkOperations.py | ReleaseUnmanagedResources | BCSharp/ironpython-stubs | python | def ReleaseUnmanagedResources(self, *args):
' '
pass |
def SetGetLocalPathForOpenCallback(self, makeLocalCopyForOpen):
'\n SetGetLocalPathForOpenCallback(self: RevitLinkOperations,makeLocalCopyForOpen: IGetLocalPathForOpenCallback)\n\n Sets the IGetLocalPathForOpenCallback that will support the "Open (and Unload)" \n\n command for Revit links\n\n obtained from an IExternalResourceServer.\n\n \n\n \n\n makeLocalCopyForOpen: The IGetLocalPathForOpenCallback that will support the "Open (and Unload)" \n\n command.\n '
pass | -8,019,159,439,824,894,000 | SetGetLocalPathForOpenCallback(self: RevitLinkOperations,makeLocalCopyForOpen: IGetLocalPathForOpenCallback)
Sets the IGetLocalPathForOpenCallback that will support the "Open (and Unload)"
command for Revit links
obtained from an IExternalResourceServer.
makeLocalCopyForOpen: The IGetLocalPathForOpenCallback that will support the "Open (and Unload)"
command. | release/stubs.min/Autodesk/Revit/DB/__init___parts/RevitLinkOperations.py | SetGetLocalPathForOpenCallback | BCSharp/ironpython-stubs | python | def SetGetLocalPathForOpenCallback(self, makeLocalCopyForOpen):
'\n SetGetLocalPathForOpenCallback(self: RevitLinkOperations,makeLocalCopyForOpen: IGetLocalPathForOpenCallback)\n\n Sets the IGetLocalPathForOpenCallback that will support the "Open (and Unload)" \n\n command for Revit links\n\n obtained from an IExternalResourceServer.\n\n \n\n \n\n makeLocalCopyForOpen: The IGetLocalPathForOpenCallback that will support the "Open (and Unload)" \n\n command.\n '
pass |
def SetOnLocalLinkSharedCoordinatesSavedCallback(self, onLocalLinkSharedCoordinatesSaved):
'\n SetOnLocalLinkSharedCoordinatesSavedCallback(self: RevitLinkOperations,onLocalLinkSharedCoordinatesSaved: IOnLocalLinkSharedCoordinatesSavedCallback)\n\n Sets the callback that will be called when the Revit user saves new shared \n\n coordinate\n\n settings to a linked document obtained from an \n\n IExternalResourceServer.\n\n \n\n \n\n onLocalLinkSharedCoordinatesSaved: An IOnLocalLinkSharedCoordinatesSavedCallback object that can respond when the \n\n user\n\n saves new shared coordinates to a Revit link document obtained from \n\n IExternalResourceServer.\n '
pass | 1,981,371,478,176,343,300 | SetOnLocalLinkSharedCoordinatesSavedCallback(self: RevitLinkOperations,onLocalLinkSharedCoordinatesSaved: IOnLocalLinkSharedCoordinatesSavedCallback)
Sets the callback that will be called when the Revit user saves new shared
coordinate
settings to a linked document obtained from an
IExternalResourceServer.
onLocalLinkSharedCoordinatesSaved: An IOnLocalLinkSharedCoordinatesSavedCallback object that can respond when the
user
saves new shared coordinates to a Revit link document obtained from
IExternalResourceServer. | release/stubs.min/Autodesk/Revit/DB/__init___parts/RevitLinkOperations.py | SetOnLocalLinkSharedCoordinatesSavedCallback | BCSharp/ironpython-stubs | python | def SetOnLocalLinkSharedCoordinatesSavedCallback(self, onLocalLinkSharedCoordinatesSaved):
'\n SetOnLocalLinkSharedCoordinatesSavedCallback(self: RevitLinkOperations,onLocalLinkSharedCoordinatesSaved: IOnLocalLinkSharedCoordinatesSavedCallback)\n\n Sets the callback that will be called when the Revit user saves new shared \n\n coordinate\n\n settings to a linked document obtained from an \n\n IExternalResourceServer.\n\n \n\n \n\n onLocalLinkSharedCoordinatesSaved: An IOnLocalLinkSharedCoordinatesSavedCallback object that can respond when the \n\n user\n\n saves new shared coordinates to a Revit link document obtained from \n\n IExternalResourceServer.\n '
pass |
def __enter__(self, *args):
' __enter__(self: IDisposable) -> object '
pass | -4,485,805,406,909,797,400 | __enter__(self: IDisposable) -> object | release/stubs.min/Autodesk/Revit/DB/__init___parts/RevitLinkOperations.py | __enter__ | BCSharp/ironpython-stubs | python | def __enter__(self, *args):
' '
pass |
def __exit__(self, *args):
' __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) '
pass | -8,148,954,987,636,554,000 | __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) | release/stubs.min/Autodesk/Revit/DB/__init___parts/RevitLinkOperations.py | __exit__ | BCSharp/ironpython-stubs | python | def __exit__(self, *args):
' '
pass |
def __init__(self, *args):
' x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature '
pass | -90,002,593,062,007,400 | x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature | release/stubs.min/Autodesk/Revit/DB/__init___parts/RevitLinkOperations.py | __init__ | BCSharp/ironpython-stubs | python | def __init__(self, *args):
' '
pass |
def __repr__(self, *args):
' __repr__(self: object) -> str '
pass | 6,997,354,972,551,989,000 | __repr__(self: object) -> str | release/stubs.min/Autodesk/Revit/DB/__init___parts/RevitLinkOperations.py | __repr__ | BCSharp/ironpython-stubs | python | def __repr__(self, *args):
' '
pass |
def file_extension(typeuri):
'\n Returns preferred file extension for resource type\n\n >>> file_extension(ANNAL.CURIE.Metadata) == "jsonld"\n True\n >>> file_extension(ANNAL.CURIE.Richtext) == "md"\n True\n\n '
return resource_types.get(typeuri, default_types)[0][0] | 6,075,813,067,008,685,000 | Returns preferred file extension for resource type
>>> file_extension(ANNAL.CURIE.Metadata) == "jsonld"
True
>>> file_extension(ANNAL.CURIE.Richtext) == "md"
True | src/annalist_root/annalist/resourcetypes.py | file_extension | gklyne/annalist | python | def file_extension(typeuri):
'\n Returns preferred file extension for resource type\n\n >>> file_extension(ANNAL.CURIE.Metadata) == "jsonld"\n True\n >>> file_extension(ANNAL.CURIE.Richtext) == "md"\n True\n\n '
return resource_types.get(typeuri, default_types)[0][0] |
def content_type(typeuri):
'\n Returns preferred MIME content-type for resource type\n\n >>> content_type(ANNAL.CURIE.Metadata) == "application/ld+json"\n True\n >>> content_type(ANNAL.CURIE.Richtext) == "text/markdown"\n True\n\n '
return resource_types.get(typeuri, default_types)[0][1] | -123,793,105,508,690,300 | Returns preferred MIME content-type for resource type
>>> content_type(ANNAL.CURIE.Metadata) == "application/ld+json"
True
>>> content_type(ANNAL.CURIE.Richtext) == "text/markdown"
True | src/annalist_root/annalist/resourcetypes.py | content_type | gklyne/annalist | python | def content_type(typeuri):
'\n Returns preferred MIME content-type for resource type\n\n >>> content_type(ANNAL.CURIE.Metadata) == "application/ld+json"\n True\n >>> content_type(ANNAL.CURIE.Richtext) == "text/markdown"\n True\n\n '
return resource_types.get(typeuri, default_types)[0][1] |
def file_extension_for_content_type(typeuri, content_type):
'\n Returns file extension for given content-type as an instance of a given type URI,\n or None.\n\n >>> file_extension_for_content_type(ANNAL.CURIE.Richtext, "text/markdown") == "md"\n True\n >>> file_extension_for_content_type(ANNAL.CURIE.Resource, "text/markdown") == "md"\n True\n >>> file_extension_for_content_type(ANNAL.CURIE.Resource, "application/pdf") == "pdf"\n True\n >>> file_extension_for_content_type(ANNAL.CURIE.Resource, "application/unknown") == None\n True\n\n '
for (fe, ct) in resource_types.get(typeuri, default_types):
if (ct == content_type):
return fe
return None | -6,839,399,522,995,400,000 | Returns file extension for given content-type as an instance of a given type URI,
or None.
>>> file_extension_for_content_type(ANNAL.CURIE.Richtext, "text/markdown") == "md"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "text/markdown") == "md"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "application/pdf") == "pdf"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "application/unknown") == None
True | src/annalist_root/annalist/resourcetypes.py | file_extension_for_content_type | gklyne/annalist | python | def file_extension_for_content_type(typeuri, content_type):
'\n Returns file extension for given content-type as an instance of a given type URI,\n or None.\n\n >>> file_extension_for_content_type(ANNAL.CURIE.Richtext, "text/markdown") == "md"\n True\n >>> file_extension_for_content_type(ANNAL.CURIE.Resource, "text/markdown") == "md"\n True\n >>> file_extension_for_content_type(ANNAL.CURIE.Resource, "application/pdf") == "pdf"\n True\n >>> file_extension_for_content_type(ANNAL.CURIE.Resource, "application/unknown") == None\n True\n\n '
for (fe, ct) in resource_types.get(typeuri, default_types):
if (ct == content_type):
return fe
return None |
def content_type_for_file_extension(typeuri, file_extension):
'\n Returns content-type for given file extension as an instance of a given type URI,\n or None.\n\n >>> content_type_for_file_extension(ANNAL.CURIE.Richtext, "md") == "text/markdown"\n True\n >>> content_type_for_file_extension(ANNAL.CURIE.Resource, "md") == "text/markdown"\n True\n >>> content_type_for_file_extension(ANNAL.CURIE.Resource, "pdf") == "application/pdf"\n True\n >>> content_type_for_file_extension(ANNAL.CURIE.Resource, "unknown") == None\n True\n\n '
for (fe, ct) in resource_types.get(typeuri, default_types):
if (fe == file_extension):
return ct
return None | 6,864,340,295,690,867,000 | Returns content-type for given file extension as an instance of a given type URI,
or None.
>>> content_type_for_file_extension(ANNAL.CURIE.Richtext, "md") == "text/markdown"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "md") == "text/markdown"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "pdf") == "application/pdf"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "unknown") == None
True | src/annalist_root/annalist/resourcetypes.py | content_type_for_file_extension | gklyne/annalist | python | def content_type_for_file_extension(typeuri, file_extension):
'\n Returns content-type for given file extension as an instance of a given type URI,\n or None.\n\n >>> content_type_for_file_extension(ANNAL.CURIE.Richtext, "md") == "text/markdown"\n True\n >>> content_type_for_file_extension(ANNAL.CURIE.Resource, "md") == "text/markdown"\n True\n >>> content_type_for_file_extension(ANNAL.CURIE.Resource, "pdf") == "application/pdf"\n True\n >>> content_type_for_file_extension(ANNAL.CURIE.Resource, "unknown") == None\n True\n\n '
for (fe, ct) in resource_types.get(typeuri, default_types):
if (fe == file_extension):
return ct
return None |
def find_packages(self, dependency):
'\n Find packages on the remote server.\n '
constraint = dependency.constraint
if (constraint is None):
constraint = '*'
if (not isinstance(constraint, VersionConstraint)):
constraint = parse_constraint(constraint)
allow_prereleases = dependency.allows_prereleases()
if isinstance(constraint, VersionRange):
if (((constraint.max is not None) and constraint.max.is_prerelease()) or ((constraint.min is not None) and constraint.min.is_prerelease())):
allow_prereleases = True
try:
info = self.get_package_info(dependency.name)
except PackageNotFound:
self._log('No packages found for {} {}'.format(dependency.name, str(constraint)), level='debug')
return []
packages = []
ignored_pre_release_packages = []
for (version, release) in info['releases'].items():
if (not release):
self._log('No release information found for {}-{}, skipping'.format(dependency.name, version), level='debug')
continue
try:
package = Package(info['info']['name'], version)
except ParseVersionError:
self._log('Unable to parse version "{}" for the {} package, skipping'.format(version, dependency.name), level='debug')
continue
if (package.is_prerelease() and (not allow_prereleases)):
if constraint.is_any():
ignored_pre_release_packages.append(package)
continue
if ((not constraint) or (constraint and constraint.allows(package.version))):
packages.append(package)
self._log('{} packages found for {} {}'.format(len(packages), dependency.name, str(constraint)), level='debug')
return (packages or ignored_pre_release_packages) | -7,957,149,436,540,154,000 | Find packages on the remote server. | venv/Lib/site-packages/poetry/repositories/pypi_repository.py | find_packages | KevinArellano94/Python-Supabase | python | def find_packages(self, dependency):
'\n \n '
constraint = dependency.constraint
if (constraint is None):
constraint = '*'
if (not isinstance(constraint, VersionConstraint)):
constraint = parse_constraint(constraint)
allow_prereleases = dependency.allows_prereleases()
if isinstance(constraint, VersionRange):
if (((constraint.max is not None) and constraint.max.is_prerelease()) or ((constraint.min is not None) and constraint.min.is_prerelease())):
allow_prereleases = True
try:
info = self.get_package_info(dependency.name)
except PackageNotFound:
self._log('No packages found for {} {}'.format(dependency.name, str(constraint)), level='debug')
return []
packages = []
ignored_pre_release_packages = []
for (version, release) in info['releases'].items():
if (not release):
self._log('No release information found for {}-{}, skipping'.format(dependency.name, version), level='debug')
continue
try:
package = Package(info['info']['name'], version)
except ParseVersionError:
self._log('Unable to parse version "{}" for the {} package, skipping'.format(version, dependency.name), level='debug')
continue
if (package.is_prerelease() and (not allow_prereleases)):
if constraint.is_any():
ignored_pre_release_packages.append(package)
continue
if ((not constraint) or (constraint and constraint.allows(package.version))):
packages.append(package)
self._log('{} packages found for {} {}'.format(len(packages), dependency.name, str(constraint)), level='debug')
return (packages or ignored_pre_release_packages) |
def get_package_info(self, name):
'\n Return the package information given its name.\n\n The information is returned from the cache if it exists\n or retrieved from the remote server.\n '
if self._disable_cache:
return self._get_package_info(name)
return self._cache.store('packages').remember_forever(name, (lambda : self._get_package_info(name))) | 2,733,772,927,765,073,000 | Return the package information given its name.
The information is returned from the cache if it exists
or retrieved from the remote server. | venv/Lib/site-packages/poetry/repositories/pypi_repository.py | get_package_info | KevinArellano94/Python-Supabase | python | def get_package_info(self, name):
'\n Return the package information given its name.\n\n The information is returned from the cache if it exists\n or retrieved from the remote server.\n '
if self._disable_cache:
return self._get_package_info(name)
return self._cache.store('packages').remember_forever(name, (lambda : self._get_package_info(name))) |
def get_release_info(self, name, version):
'\n Return the release information given a package name and a version.\n\n The information is returned from the cache if it exists\n or retrieved from the remote server.\n '
if self._disable_cache:
return PackageInfo.load(self._get_release_info(name, version))
cached = self._cache.remember_forever('{}:{}'.format(name, version), (lambda : self._get_release_info(name, version)))
cache_version = cached.get('_cache_version', '0.0.0')
if (parse_constraint(cache_version) != self.CACHE_VERSION):
self._log('The cache for {} {} is outdated. Refreshing.'.format(name, version), level='debug')
cached = self._get_release_info(name, version)
self._cache.forever('{}:{}'.format(name, version), cached)
return PackageInfo.load(cached) | -619,394,622,787,674,400 | Return the release information given a package name and a version.
The information is returned from the cache if it exists
or retrieved from the remote server. | venv/Lib/site-packages/poetry/repositories/pypi_repository.py | get_release_info | KevinArellano94/Python-Supabase | python | def get_release_info(self, name, version):
'\n Return the release information given a package name and a version.\n\n The information is returned from the cache if it exists\n or retrieved from the remote server.\n '
if self._disable_cache:
return PackageInfo.load(self._get_release_info(name, version))
cached = self._cache.remember_forever('{}:{}'.format(name, version), (lambda : self._get_release_info(name, version)))
cache_version = cached.get('_cache_version', '0.0.0')
if (parse_constraint(cache_version) != self.CACHE_VERSION):
self._log('The cache for {} {} is outdated. Refreshing.'.format(name, version), level='debug')
cached = self._get_release_info(name, version)
self._cache.forever('{}:{}'.format(name, version), cached)
return PackageInfo.load(cached) |
def __init__(self, whitelist=None):
'ShowWhitelistResponse - a model defined in huaweicloud sdk'
super().__init__()
self._whitelist = None
self.discriminator = None
if (whitelist is not None):
self.whitelist = whitelist | -6,618,678,179,490,809,000 | ShowWhitelistResponse - a model defined in huaweicloud sdk | huaweicloud-sdk-elb/huaweicloudsdkelb/v2/model/show_whitelist_response.py | __init__ | Adek06/huaweicloud-sdk-python-v3 | python | def __init__(self, whitelist=None):
super().__init__()
self._whitelist = None
self.discriminator = None
if (whitelist is not None):
self.whitelist = whitelist |
@property
def whitelist(self):
'Gets the whitelist of this ShowWhitelistResponse.\n\n\n :return: The whitelist of this ShowWhitelistResponse.\n :rtype: WhitelistResp\n '
return self._whitelist | -9,018,610,111,317,692,000 | Gets the whitelist of this ShowWhitelistResponse.
:return: The whitelist of this ShowWhitelistResponse.
:rtype: WhitelistResp | huaweicloud-sdk-elb/huaweicloudsdkelb/v2/model/show_whitelist_response.py | whitelist | Adek06/huaweicloud-sdk-python-v3 | python | @property
def whitelist(self):
'Gets the whitelist of this ShowWhitelistResponse.\n\n\n :return: The whitelist of this ShowWhitelistResponse.\n :rtype: WhitelistResp\n '
return self._whitelist |
@whitelist.setter
def whitelist(self, whitelist):
'Sets the whitelist of this ShowWhitelistResponse.\n\n\n :param whitelist: The whitelist of this ShowWhitelistResponse.\n :type: WhitelistResp\n '
self._whitelist = whitelist | -8,439,878,928,571,194,000 | Sets the whitelist of this ShowWhitelistResponse.
:param whitelist: The whitelist of this ShowWhitelistResponse.
:type: WhitelistResp | huaweicloud-sdk-elb/huaweicloudsdkelb/v2/model/show_whitelist_response.py | whitelist | Adek06/huaweicloud-sdk-python-v3 | python | @whitelist.setter
def whitelist(self, whitelist):
'Sets the whitelist of this ShowWhitelistResponse.\n\n\n :param whitelist: The whitelist of this ShowWhitelistResponse.\n :type: WhitelistResp\n '
self._whitelist = whitelist |
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
elif (attr in self.sensitive_list):
result[attr] = '****'
else:
result[attr] = value
return result | 2,594,216,033,120,720,000 | Returns the model properties as a dict | huaweicloud-sdk-elb/huaweicloudsdkelb/v2/model/show_whitelist_response.py | to_dict | Adek06/huaweicloud-sdk-python-v3 | python | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
elif (attr in self.sensitive_list):
result[attr] = '****'
else:
result[attr] = value
return result |
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict()) | 5,849,158,643,760,736,000 | Returns the string representation of the model | huaweicloud-sdk-elb/huaweicloudsdkelb/v2/model/show_whitelist_response.py | to_str | Adek06/huaweicloud-sdk-python-v3 | python | def to_str(self):
return pprint.pformat(self.to_dict()) |
def __repr__(self):
'For `print` and `pprint`'
return self.to_str() | -8,960,031,694,814,905,000 | For `print` and `pprint` | huaweicloud-sdk-elb/huaweicloudsdkelb/v2/model/show_whitelist_response.py | __repr__ | Adek06/huaweicloud-sdk-python-v3 | python | def __repr__(self):
return self.to_str() |
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, ShowWhitelistResponse)):
return False
return (self.__dict__ == other.__dict__) | 5,736,248,680,708,653,000 | Returns true if both objects are equal | huaweicloud-sdk-elb/huaweicloudsdkelb/v2/model/show_whitelist_response.py | __eq__ | Adek06/huaweicloud-sdk-python-v3 | python | def __eq__(self, other):
if (not isinstance(other, ShowWhitelistResponse)):
return False
return (self.__dict__ == other.__dict__) |
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other)) | 7,764,124,047,908,058,000 | Returns true if both objects are not equal | huaweicloud-sdk-elb/huaweicloudsdkelb/v2/model/show_whitelist_response.py | __ne__ | Adek06/huaweicloud-sdk-python-v3 | python | def __ne__(self, other):
return (not (self == other)) |
def rot_matrix_xaxis(theta=0):
'\n Rotation matrix of a transformation around X axis.\n\n Parameters\n ----------\n theta : `float`\n Rotation angle in radians\n Returns\n -------\n A : `np.ndarray`\n Rotation matrix, with shape (3, 3)\n '
A = np.array([[1, 0, 0], [0, np.cos(theta), ((- 1) * np.sin(theta))], [0, np.sin(theta), np.cos(theta)]])
return A | 1,438,942,499,636,511,700 | Rotation matrix of a transformation around X axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3) | tests/conftest.py | rot_matrix_xaxis | vcristiani/galaxy-chop | python | def rot_matrix_xaxis(theta=0):
'\n Rotation matrix of a transformation around X axis.\n\n Parameters\n ----------\n theta : `float`\n Rotation angle in radians\n Returns\n -------\n A : `np.ndarray`\n Rotation matrix, with shape (3, 3)\n '
A = np.array([[1, 0, 0], [0, np.cos(theta), ((- 1) * np.sin(theta))], [0, np.sin(theta), np.cos(theta)]])
return A |
def rot_matrix_yaxis(theta=0):
'\n Rotation matrix of a transformation around Y axis.\n\n Parameters\n ----------\n theta : `float`\n Rotation angle in radians\n Returns\n -------\n A : `np.ndarray`\n Rotation matrix, with shape (3, 3)\n '
A = np.array([[np.cos(theta), 0, np.sin(theta)], [0, 1, 0], [((- 1) * np.sin(theta)), 0, np.cos(theta)]])
return A | 1,282,878,592,575,139,300 | Rotation matrix of a transformation around Y axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3) | tests/conftest.py | rot_matrix_yaxis | vcristiani/galaxy-chop | python | def rot_matrix_yaxis(theta=0):
'\n Rotation matrix of a transformation around Y axis.\n\n Parameters\n ----------\n theta : `float`\n Rotation angle in radians\n Returns\n -------\n A : `np.ndarray`\n Rotation matrix, with shape (3, 3)\n '
A = np.array([[np.cos(theta), 0, np.sin(theta)], [0, 1, 0], [((- 1) * np.sin(theta)), 0, np.cos(theta)]])
return A |
def rot_matrix_zaxis(theta=0):
'\n Rotation matrix of a transformation around Z axis.\n\n Parameters\n ----------\n theta : `float`\n Rotation angle in radians\n Returns\n -------\n A : `np.ndarray`\n Rotation matrix, with shape (3, 3)\n '
A = np.array([[np.cos(theta), ((- 1) * np.sin(theta)), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
return A | 3,500,014,483,586,020,000 | Rotation matrix of a transformation around Z axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3) | tests/conftest.py | rot_matrix_zaxis | vcristiani/galaxy-chop | python | def rot_matrix_zaxis(theta=0):
'\n Rotation matrix of a transformation around Z axis.\n\n Parameters\n ----------\n theta : `float`\n Rotation angle in radians\n Returns\n -------\n A : `np.ndarray`\n Rotation matrix, with shape (3, 3)\n '
A = np.array([[np.cos(theta), ((- 1) * np.sin(theta)), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
return A |
def rotate(pos, vel, matrix):
'\n Rotate.\n\n Applies the rotation `matrix` to a set of particles positions `pos` and\n velocities `vel`\n\n Parameters\n ----------\n pos : `np.ndarray`, shape = (N_part, 3)\n Positions of particles\n vel : `np.ndarray`, shape = (N_part, 3)\n Velocities of particles\n matrix : `np.ndarray`\n Rotation matrix, with shape (3, 3)\n\n Returns\n -------\n pos_rot : `np.ndarray`, shape = (N_part, 3)\n Rotated, positions of particles\n vel_rot : `np.ndarray`, shape = (N_part, 3)\n Rotated, velocities of particles\n '
pos_rot = (pos @ matrix)
vel_rot = (vel @ matrix)
return (pos_rot, vel_rot) | -476,487,759,244,119,900 | Rotate.
Applies the rotation `matrix` to a set of particles positions `pos` and
velocities `vel`
Parameters
----------
pos : `np.ndarray`, shape = (N_part, 3)
Positions of particles
vel : `np.ndarray`, shape = (N_part, 3)
Velocities of particles
matrix : `np.ndarray`
Rotation matrix, with shape (3, 3)
Returns
-------
pos_rot : `np.ndarray`, shape = (N_part, 3)
Rotated, positions of particles
vel_rot : `np.ndarray`, shape = (N_part, 3)
Rotated, velocities of particles | tests/conftest.py | rotate | vcristiani/galaxy-chop | python | def rotate(pos, vel, matrix):
'\n Rotate.\n\n Applies the rotation `matrix` to a set of particles positions `pos` and\n velocities `vel`\n\n Parameters\n ----------\n pos : `np.ndarray`, shape = (N_part, 3)\n Positions of particles\n vel : `np.ndarray`, shape = (N_part, 3)\n Velocities of particles\n matrix : `np.ndarray`\n Rotation matrix, with shape (3, 3)\n\n Returns\n -------\n pos_rot : `np.ndarray`, shape = (N_part, 3)\n Rotated, positions of particles\n vel_rot : `np.ndarray`, shape = (N_part, 3)\n Rotated, velocities of particles\n '
pos_rot = (pos @ matrix)
vel_rot = (vel @ matrix)
return (pos_rot, vel_rot) |
def distance(x, y, z, m):
'\n Distances calculator.\n\n Calculate distances beetween particles.\n\n Parameters\n ----------\n x, y, z: `np.ndarray`, shape = (N_part, 1)\n Positions\n m : `np.ndarray`, shape = (N_part, 1)\n Masses\n\n Returns\n -------\n dx, dy, dz: `np.ndarray`, shape = (N_part, N_part)\n Distances between particles.\n '
N_part = len(m)
dx = np.zeros((N_part, N_part))
dy = np.zeros((N_part, N_part))
dz = np.zeros((N_part, N_part))
for i in range(0, (N_part - 1)):
for j in range((i + 1), N_part):
dx[(i, j)] = (x[j] - x[i])
dy[(i, j)] = (y[j] - y[i])
dz[(i, j)] = (z[j] - z[i])
dx[(j, i)] = (- dx[(i, j)])
dy[(j, i)] = (- dy[(i, j)])
dz[(j, i)] = (- dz[(i, j)])
return (dx, dy, dz) | -7,921,276,597,573,377,000 | Distances calculator.
Calculate distances beetween particles.
Parameters
----------
x, y, z: `np.ndarray`, shape = (N_part, 1)
Positions
m : `np.ndarray`, shape = (N_part, 1)
Masses
Returns
-------
dx, dy, dz: `np.ndarray`, shape = (N_part, N_part)
Distances between particles. | tests/conftest.py | distance | vcristiani/galaxy-chop | python | def distance(x, y, z, m):
'\n Distances calculator.\n\n Calculate distances beetween particles.\n\n Parameters\n ----------\n x, y, z: `np.ndarray`, shape = (N_part, 1)\n Positions\n m : `np.ndarray`, shape = (N_part, 1)\n Masses\n\n Returns\n -------\n dx, dy, dz: `np.ndarray`, shape = (N_part, N_part)\n Distances between particles.\n '
N_part = len(m)
dx = np.zeros((N_part, N_part))
dy = np.zeros((N_part, N_part))
dz = np.zeros((N_part, N_part))
for i in range(0, (N_part - 1)):
for j in range((i + 1), N_part):
dx[(i, j)] = (x[j] - x[i])
dy[(i, j)] = (y[j] - y[i])
dz[(i, j)] = (z[j] - z[i])
dx[(j, i)] = (- dx[(i, j)])
dy[(j, i)] = (- dy[(i, j)])
dz[(j, i)] = (- dz[(i, j)])
return (dx, dy, dz) |
def epot(x, y, z, m, eps=0.0):
'\n Potential energy with python.\n\n Parameters\n ----------\n x, y, z: `np.ndarray`, shape = (N_part, 1)\n Positions\n m : `np.ndarray`, shape = (N_part, 1)\n Masses\n eps: `float`\n Softening radius\n\n Returns\n -------\n Upot: `np.ndarray`, shape = (N_part, 1)\n Potential energy of particles\n '
G = 4.299e-06
N_part = len(m)
U = np.zeros((N_part, N_part))
(dx, dy, dz) = distance(x, y, z, m)
dist = np.sqrt(((((dx ** 2) + (dy ** 2)) + (dz ** 2)) + (eps ** 2)))
for i in range((N_part - 1)):
for j in range((i + 1), N_part):
U[(i, j)] = (((G * m[j]) * m[i]) / dist[(i, j)])
U[(j, i)] = U[(i, j)]
Upot = np.sum((U / m), axis=0)
return Upot | 5,507,244,668,530,906,000 | Potential energy with python.
Parameters
----------
x, y, z: `np.ndarray`, shape = (N_part, 1)
Positions
m : `np.ndarray`, shape = (N_part, 1)
Masses
eps: `float`
Softening radius
Returns
-------
Upot: `np.ndarray`, shape = (N_part, 1)
Potential energy of particles | tests/conftest.py | epot | vcristiani/galaxy-chop | python | def epot(x, y, z, m, eps=0.0):
'\n Potential energy with python.\n\n Parameters\n ----------\n x, y, z: `np.ndarray`, shape = (N_part, 1)\n Positions\n m : `np.ndarray`, shape = (N_part, 1)\n Masses\n eps: `float`\n Softening radius\n\n Returns\n -------\n Upot: `np.ndarray`, shape = (N_part, 1)\n Potential energy of particles\n '
G = 4.299e-06
N_part = len(m)
U = np.zeros((N_part, N_part))
(dx, dy, dz) = distance(x, y, z, m)
dist = np.sqrt(((((dx ** 2) + (dy ** 2)) + (dz ** 2)) + (eps ** 2)))
for i in range((N_part - 1)):
for j in range((i + 1), N_part):
U[(i, j)] = (((G * m[j]) * m[i]) / dist[(i, j)])
U[(j, i)] = U[(i, j)]
Upot = np.sum((U / m), axis=0)
return Upot |
@pytest.fixture
def random_galaxy_params():
'\n Galaxy parameter for test.\n\n This return a function of a dictionary with random params of a Galaxy\n object\n '
def make(stars, gas, dm, seed):
random = np.random.default_rng(seed=seed)
x_s = random.random(stars)
y_s = random.random(stars)
z_s = random.random(stars)
vx_s = random.random(stars)
vy_s = random.random(stars)
vz_s = random.random(stars)
m_s = random.random(stars)
x_dm = random.random(dm)
y_dm = random.random(dm)
z_dm = random.random(dm)
vx_dm = random.random(dm)
vy_dm = random.random(dm)
vz_dm = random.random(dm)
m_dm = random.random(dm)
x_g = random.random(gas)
y_g = random.random(gas)
z_g = random.random(gas)
vx_g = random.random(gas)
vy_g = random.random(gas)
vz_g = random.random(gas)
m_g = random.random(gas)
params = {'m_s': m_s, 'x_s': x_s, 'y_s': y_s, 'z_s': z_s, 'vx_s': vx_s, 'vy_s': vy_s, 'vz_s': vz_s, 'm_dm': m_dm, 'x_dm': x_dm, 'y_dm': y_dm, 'z_dm': z_dm, 'vx_dm': vx_dm, 'vy_dm': vy_dm, 'vz_dm': vz_dm, 'm_g': m_g, 'x_g': x_g, 'y_g': y_g, 'z_g': z_g, 'vx_g': vx_g, 'vy_g': vy_g, 'vz_g': vz_g}
return params
return make | 8,858,762,264,248,654,000 | Galaxy parameter for test.
This return a function of a dictionary with random params of a Galaxy
object | tests/conftest.py | random_galaxy_params | vcristiani/galaxy-chop | python | @pytest.fixture
def random_galaxy_params():
'\n Galaxy parameter for test.\n\n This return a function of a dictionary with random params of a Galaxy\n object\n '
def make(stars, gas, dm, seed):
random = np.random.default_rng(seed=seed)
x_s = random.random(stars)
y_s = random.random(stars)
z_s = random.random(stars)
vx_s = random.random(stars)
vy_s = random.random(stars)
vz_s = random.random(stars)
m_s = random.random(stars)
x_dm = random.random(dm)
y_dm = random.random(dm)
z_dm = random.random(dm)
vx_dm = random.random(dm)
vy_dm = random.random(dm)
vz_dm = random.random(dm)
m_dm = random.random(dm)
x_g = random.random(gas)
y_g = random.random(gas)
z_g = random.random(gas)
vx_g = random.random(gas)
vy_g = random.random(gas)
vz_g = random.random(gas)
m_g = random.random(gas)
params = {'m_s': m_s, 'x_s': x_s, 'y_s': y_s, 'z_s': z_s, 'vx_s': vx_s, 'vy_s': vy_s, 'vz_s': vz_s, 'm_dm': m_dm, 'x_dm': x_dm, 'y_dm': y_dm, 'z_dm': z_dm, 'vx_dm': vx_dm, 'vy_dm': vy_dm, 'vz_dm': vz_dm, 'm_g': m_g, 'x_g': x_g, 'y_g': y_g, 'z_g': z_g, 'vx_g': vx_g, 'vy_g': vy_g, 'vz_g': vz_g}
return params
return make |
@pytest.fixture(scope='session')
def solid_disk():
'\n Mock solid disk.\n\n Creates a mock solid disc of particles with masses\n and velocities.\n '
def make(N_part=100, rmax=30, rmin=2, omega=10, seed=42):
random = np.random.RandomState(seed=seed)
r = (((rmax - rmin) * random.random_sample(size=N_part)) + rmin)
phi0 = ((2 * np.pi) * random.random_sample(size=N_part))
mass = (100000000.0 * np.ones_like(r))
x = (r * np.cos(phi0))
y = (r * np.sin(phi0))
z = ((1 * random.random_sample(size=N_part)) - 0.5)
xdot = ((((- 1) * omega) * r) * np.sin(phi0))
ydot = ((omega * r) * np.cos(phi0))
zdot = np.zeros_like(xdot)
pos = np.array([x, y, z]).T
vel = np.array([xdot, ydot, zdot]).T
return (mass, pos, vel)
return make | 663,195,190,636,283,000 | Mock solid disk.
Creates a mock solid disc of particles with masses
and velocities. | tests/conftest.py | solid_disk | vcristiani/galaxy-chop | python | @pytest.fixture(scope='session')
def solid_disk():
'\n Mock solid disk.\n\n Creates a mock solid disc of particles with masses\n and velocities.\n '
def make(N_part=100, rmax=30, rmin=2, omega=10, seed=42):
random = np.random.RandomState(seed=seed)
r = (((rmax - rmin) * random.random_sample(size=N_part)) + rmin)
phi0 = ((2 * np.pi) * random.random_sample(size=N_part))
mass = (100000000.0 * np.ones_like(r))
x = (r * np.cos(phi0))
y = (r * np.sin(phi0))
z = ((1 * random.random_sample(size=N_part)) - 0.5)
xdot = ((((- 1) * omega) * r) * np.sin(phi0))
ydot = ((omega * r) * np.cos(phi0))
zdot = np.zeros_like(xdot)
pos = np.array([x, y, z]).T
vel = np.array([xdot, ydot, zdot]).T
return (mass, pos, vel)
return make |
@pytest.fixture(scope='session')
def mock_dm_halo():
'\n Mock dark matter Halo.\n\n Creates a mock DM halo of particles with masses\n and velocities.\n '
def make(N_part=1000, rmax=100, seed=55):
random = np.random.RandomState(seed=seed)
r = (random.random_sample(size=N_part) * rmax)
cos_t = ((random.random_sample(size=N_part) * 2.0) - 1)
phi0 = ((2 * np.pi) * random.random_sample(size=N_part))
sin_t = np.sqrt((1 - (cos_t ** 2)))
mass = (10000000000.0 * np.ones_like(r))
x = ((r * sin_t) * np.cos(phi0))
y = ((r * sin_t) * np.sin(phi0))
z = (r * cos_t)
pos = np.array([x, y, z]).T
return (mass, pos)
return make | 6,796,379,255,751,300,000 | Mock dark matter Halo.
Creates a mock DM halo of particles with masses
and velocities. | tests/conftest.py | mock_dm_halo | vcristiani/galaxy-chop | python | @pytest.fixture(scope='session')
def mock_dm_halo():
'\n Mock dark matter Halo.\n\n Creates a mock DM halo of particles with masses\n and velocities.\n '
def make(N_part=1000, rmax=100, seed=55):
random = np.random.RandomState(seed=seed)
r = (random.random_sample(size=N_part) * rmax)
cos_t = ((random.random_sample(size=N_part) * 2.0) - 1)
phi0 = ((2 * np.pi) * random.random_sample(size=N_part))
sin_t = np.sqrt((1 - (cos_t ** 2)))
mass = (10000000000.0 * np.ones_like(r))
x = ((r * sin_t) * np.cos(phi0))
y = ((r * sin_t) * np.sin(phi0))
z = (r * cos_t)
pos = np.array([x, y, z]).T
return (mass, pos)
return make |
@pytest.fixture
def disc_zero_angle(solid_disk):
'Disc with no angle of inclination.'
(mass, pos, vel) = solid_disk(N_part=1000)
return (mass, pos, vel) | 6,351,688,334,350,451,000 | Disc with no angle of inclination. | tests/conftest.py | disc_zero_angle | vcristiani/galaxy-chop | python | @pytest.fixture
def disc_zero_angle(solid_disk):
(mass, pos, vel) = solid_disk(N_part=1000)
return (mass, pos, vel) |
@pytest.fixture
def disc_xrotation(solid_disk):
'Disc rotated over x axis.'
(mass, pos, vel) = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_xaxis(theta=((0.3 * np.pi) * random.random()))
return (mass, (pos @ a), (vel @ a), a) | -2,466,450,466,322,028,500 | Disc rotated over x axis. | tests/conftest.py | disc_xrotation | vcristiani/galaxy-chop | python | @pytest.fixture
def disc_xrotation(solid_disk):
(mass, pos, vel) = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_xaxis(theta=((0.3 * np.pi) * random.random()))
return (mass, (pos @ a), (vel @ a), a) |
@pytest.fixture
def disc_yrotation(solid_disk):
'Disc rotated over y axis.'
(mass, pos, vel) = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_yaxis(theta=((0.3 * np.pi) * random.random()))
return (mass, (pos @ a), (vel @ a), a) | -5,700,432,447,759,388,000 | Disc rotated over y axis. | tests/conftest.py | disc_yrotation | vcristiani/galaxy-chop | python | @pytest.fixture
def disc_yrotation(solid_disk):
(mass, pos, vel) = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_yaxis(theta=((0.3 * np.pi) * random.random()))
return (mass, (pos @ a), (vel @ a), a) |
@pytest.fixture
def disc_zrotation(solid_disk):
'Disc rotated over z axis.'
(mass, pos, vel) = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_zaxis(theta=((0.3 * np.pi) * random.random()))
return (mass, (pos @ a), (vel @ a), a) | 2,917,622,923,905,925,600 | Disc rotated over z axis. | tests/conftest.py | disc_zrotation | vcristiani/galaxy-chop | python | @pytest.fixture
def disc_zrotation(solid_disk):
(mass, pos, vel) = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_zaxis(theta=((0.3 * np.pi) * random.random()))
return (mass, (pos @ a), (vel @ a), a) |
@pytest.fixture
def disc_particles(solid_disk):
'Solid disc without velocities.'
(mass, pos, vel) = solid_disk(N_part=100)
return (pos[:, 0], pos[:, 1], pos[:, 2], mass) | 7,966,807,864,763,880,000 | Solid disc without velocities. | tests/conftest.py | disc_particles | vcristiani/galaxy-chop | python | @pytest.fixture
def disc_particles(solid_disk):
(mass, pos, vel) = solid_disk(N_part=100)
return (pos[:, 0], pos[:, 1], pos[:, 2], mass) |
@pytest.fixture
def disc_particles_all(solid_disk):
'Solid disc with velocities.'
(mass_s, pos_s, vel_s) = solid_disk(N_part=100)
(mass_g, pos_g, vel_g) = solid_disk(N_part=100)
return (mass_s, pos_s, vel_s, mass_g, pos_g, vel_g) | -115,016,087,222,377,730 | Solid disc with velocities. | tests/conftest.py | disc_particles_all | vcristiani/galaxy-chop | python | @pytest.fixture
def disc_particles_all(solid_disk):
(mass_s, pos_s, vel_s) = solid_disk(N_part=100)
(mass_g, pos_g, vel_g) = solid_disk(N_part=100)
return (mass_s, pos_s, vel_s, mass_g, pos_g, vel_g) |
@pytest.fixture(scope='session')
def halo_particles(mock_dm_halo):
'Spherical mock halo.'
def make(N_part=100, seed=None):
random = np.random.RandomState(seed=seed)
(mass_dm, pos_dm) = mock_dm_halo(N_part=N_part)
vel_dm = random.random_sample(size=(N_part, 3))
return (mass_dm, pos_dm, vel_dm)
return make | -5,355,924,634,601,757,000 | Spherical mock halo. | tests/conftest.py | halo_particles | vcristiani/galaxy-chop | python | @pytest.fixture(scope='session')
def halo_particles(mock_dm_halo):
def make(N_part=100, seed=None):
random = np.random.RandomState(seed=seed)
(mass_dm, pos_dm) = mock_dm_halo(N_part=N_part)
vel_dm = random.random_sample(size=(N_part, 3))
return (mass_dm, pos_dm, vel_dm)
return make |
@pytest.fixture
def mock_galaxy(disc_particles_all, halo_particles):
'Mock galaxy.'
(mass_s, pos_s, vel_s, mass_g, pos_g, vel_g) = disc_particles_all
(mass_dm, pos_dm, vel_dm) = halo_particles(N_part=100, seed=42)
g = core.Galaxy(m_s=(mass_s * u.M_sun), x_s=(pos_s[:, 0] * u.kpc), y_s=(pos_s[:, 1] * u.kpc), z_s=(pos_s[:, 2] * u.kpc), vx_s=(vel_s[:, 0] * (u.km / u.s)), vy_s=(vel_s[:, 1] * (u.km / u.s)), vz_s=(vel_s[:, 2] * (u.km / u.s)), m_dm=(mass_dm * u.M_sun), x_dm=(pos_dm[:, 0] * u.kpc), y_dm=(pos_dm[:, 1] * u.kpc), z_dm=(pos_dm[:, 2] * u.kpc), vx_dm=(vel_dm[:, 0] * (u.km / u.s)), vy_dm=(vel_dm[:, 1] * (u.km / u.s)), vz_dm=(vel_dm[:, 2] * (u.km / u.s)), m_g=(mass_g * u.M_sun), x_g=(pos_g[:, 0] * u.kpc), y_g=(pos_g[:, 1] * u.kpc), z_g=(pos_g[:, 2] * u.kpc), vx_g=(vel_g[:, 0] * (u.km / u.s)), vy_g=(vel_g[:, 1] * (u.km / u.s)), vz_g=(vel_g[:, 2] * (u.km / u.s)))
return g | -3,076,421,864,516,630,500 | Mock galaxy. | tests/conftest.py | mock_galaxy | vcristiani/galaxy-chop | python | @pytest.fixture
def mock_galaxy(disc_particles_all, halo_particles):
(mass_s, pos_s, vel_s, mass_g, pos_g, vel_g) = disc_particles_all
(mass_dm, pos_dm, vel_dm) = halo_particles(N_part=100, seed=42)
g = core.Galaxy(m_s=(mass_s * u.M_sun), x_s=(pos_s[:, 0] * u.kpc), y_s=(pos_s[:, 1] * u.kpc), z_s=(pos_s[:, 2] * u.kpc), vx_s=(vel_s[:, 0] * (u.km / u.s)), vy_s=(vel_s[:, 1] * (u.km / u.s)), vz_s=(vel_s[:, 2] * (u.km / u.s)), m_dm=(mass_dm * u.M_sun), x_dm=(pos_dm[:, 0] * u.kpc), y_dm=(pos_dm[:, 1] * u.kpc), z_dm=(pos_dm[:, 2] * u.kpc), vx_dm=(vel_dm[:, 0] * (u.km / u.s)), vy_dm=(vel_dm[:, 1] * (u.km / u.s)), vz_dm=(vel_dm[:, 2] * (u.km / u.s)), m_g=(mass_g * u.M_sun), x_g=(pos_g[:, 0] * u.kpc), y_g=(pos_g[:, 1] * u.kpc), z_g=(pos_g[:, 2] * u.kpc), vx_g=(vel_g[:, 0] * (u.km / u.s)), vy_g=(vel_g[:, 1] * (u.km / u.s)), vz_g=(vel_g[:, 2] * (u.km / u.s)))
return g |
@pytest.fixture
def mock_real_galaxy():
'Mock real galaxy.'
dm = np.loadtxt((TEST_DATA_REAL_PATH / 'dark.dat'))
s = np.loadtxt((TEST_DATA_REAL_PATH / 'star.dat'))
g = np.loadtxt((TEST_DATA_REAL_PATH / 'gas_.dat'))
gal = core.Galaxy(m_s=((s[:, 0] * 10000000000.0) * u.M_sun), x_s=(s[:, 1] * u.kpc), y_s=(s[:, 2] * u.kpc), z_s=(s[:, 3] * u.kpc), vx_s=(s[:, 4] * (u.km / u.s)), vy_s=(s[:, 5] * (u.km / u.s)), vz_s=(s[:, 6] * (u.km / u.s)), m_dm=((dm[:, 0] * 10000000000.0) * u.M_sun), x_dm=(dm[:, 1] * u.kpc), y_dm=(dm[:, 2] * u.kpc), z_dm=(dm[:, 3] * u.kpc), vx_dm=(dm[:, 4] * (u.km / u.s)), vy_dm=(dm[:, 5] * (u.km / u.s)), vz_dm=(dm[:, 6] * (u.km / u.s)), m_g=((g[:, 0] * 10000000000.0) * u.M_sun), x_g=(g[:, 1] * u.kpc), y_g=(g[:, 2] * u.kpc), z_g=(g[:, 3] * u.kpc), vx_g=(g[:, 4] * (u.km / u.s)), vy_g=(g[:, 5] * (u.km / u.s)), vz_g=(g[:, 6] * (u.km / u.s)))
return gal | 888,360,333,462,419,000 | Mock real galaxy. | tests/conftest.py | mock_real_galaxy | vcristiani/galaxy-chop | python | @pytest.fixture
def mock_real_galaxy():
dm = np.loadtxt((TEST_DATA_REAL_PATH / 'dark.dat'))
s = np.loadtxt((TEST_DATA_REAL_PATH / 'star.dat'))
g = np.loadtxt((TEST_DATA_REAL_PATH / 'gas_.dat'))
gal = core.Galaxy(m_s=((s[:, 0] * 10000000000.0) * u.M_sun), x_s=(s[:, 1] * u.kpc), y_s=(s[:, 2] * u.kpc), z_s=(s[:, 3] * u.kpc), vx_s=(s[:, 4] * (u.km / u.s)), vy_s=(s[:, 5] * (u.km / u.s)), vz_s=(s[:, 6] * (u.km / u.s)), m_dm=((dm[:, 0] * 10000000000.0) * u.M_sun), x_dm=(dm[:, 1] * u.kpc), y_dm=(dm[:, 2] * u.kpc), z_dm=(dm[:, 3] * u.kpc), vx_dm=(dm[:, 4] * (u.km / u.s)), vy_dm=(dm[:, 5] * (u.km / u.s)), vz_dm=(dm[:, 6] * (u.km / u.s)), m_g=((g[:, 0] * 10000000000.0) * u.M_sun), x_g=(g[:, 1] * u.kpc), y_g=(g[:, 2] * u.kpc), z_g=(g[:, 3] * u.kpc), vx_g=(g[:, 4] * (u.km / u.s)), vy_g=(g[:, 5] * (u.km / u.s)), vz_g=(g[:, 6] * (u.km / u.s)))
return gal |
async def dial_peer(self, ip: str, port: int, peer_id: ID) -> None:
'\n Dial the peer ``peer_id`` through the IPv4 protocol\n '
(await self.host.connect(PeerInfo(peer_id=peer_id, addrs=[make_tcp_ip_maddr(ip, port)]))) | -9,166,332,256,899,670,000 | Dial the peer ``peer_id`` through the IPv4 protocol | trinity/protocol/bcc_libp2p/node.py | dial_peer | pipermerriam/trinity | python | async def dial_peer(self, ip: str, port: int, peer_id: ID) -> None:
'\n \n '
(await self.host.connect(PeerInfo(peer_id=peer_id, addrs=[make_tcp_ip_maddr(ip, port)]))) |
async def dial_peer_maddr(self, maddr: Multiaddr) -> None:
'\n Parse `maddr`, get the ip:port and PeerID, and call `dial_peer` with the parameters.\n '
ip = maddr.value_for_protocol(protocols.P_IP4)
port = maddr.value_for_protocol(protocols.P_TCP)
peer_id = ID.from_base58(maddr.value_for_protocol(protocols.P_P2P))
(await self.dial_peer(ip=ip, port=port, peer_id=peer_id)) | -9,147,664,730,211,368,000 | Parse `maddr`, get the ip:port and PeerID, and call `dial_peer` with the parameters. | trinity/protocol/bcc_libp2p/node.py | dial_peer_maddr | pipermerriam/trinity | python | async def dial_peer_maddr(self, maddr: Multiaddr) -> None:
'\n \n '
ip = maddr.value_for_protocol(protocols.P_IP4)
port = maddr.value_for_protocol(protocols.P_TCP)
peer_id = ID.from_base58(maddr.value_for_protocol(protocols.P_P2P))
(await self.dial_peer(ip=ip, port=port, peer_id=peer_id)) |
def pare(text, size, etc='...'):
"Pare text to have maximum size and add etc to the end if it's\n changed"
size = int(size)
text = text.strip()
if (len(text) > size):
to_be_stripped = (not whitespace_re.findall(text[(size - 1):(size + 2)]))
text = text[:size]
if to_be_stripped:
half = (size // 2)
last = None
for mo in whitespace_re.finditer(text[half:]):
last = mo
if (last is not None):
text = text[:((half + last.start()) + 1)]
return (text.rstrip() + etc)
else:
return text | -5,060,655,267,398,391,000 | Pare text to have maximum size and add etc to the end if it's
changed | iktomi/utils/text.py | pare | SlivTime/iktomi | python | def pare(text, size, etc='...'):
"Pare text to have maximum size and add etc to the end if it's\n changed"
size = int(size)
text = text.strip()
if (len(text) > size):
to_be_stripped = (not whitespace_re.findall(text[(size - 1):(size + 2)]))
text = text[:size]
if to_be_stripped:
half = (size // 2)
last = None
for mo in whitespace_re.finditer(text[half:]):
last = mo
if (last is not None):
text = text[:((half + last.start()) + 1)]
return (text.rstrip() + etc)
else:
return text |
def ready(self):
'Override this to put in:\n Users system checks\n Users signal registration\n '
from .signals import user_signed_up | 5,655,973,104,684,093,000 | Override this to put in:
Users system checks
Users signal registration | nomadgram/users/apps.py | ready | JeewhanR/Nomadgram | python | def ready(self):
'Override this to put in:\n Users system checks\n Users signal registration\n '
from .signals import user_signed_up |
@staticmethod
def create(data, id, level, name, startTime, state, stderr, stdout):
'\n :type data: str\n :type id: str\n :type level: int\n :type name: EnumJobResultName\n :type startTime: int\n :type state: EnumJobResultState\n :type stderr: str\n :type stdout: str\n :rtype: JobResult\n '
return JobResult(data=data, id=id, level=level, name=name, startTime=startTime, state=state, stderr=stderr, stdout=stdout) | 7,922,843,436,683,396,000 | :type data: str
:type id: str
:type level: int
:type name: EnumJobResultName
:type startTime: int
:type state: EnumJobResultState
:type stderr: str
:type stdout: str
:rtype: JobResult | pyclient/zeroos/orchestrator/client/JobResult.py | create | 5l1v3r1/0-orchestrator | python | @staticmethod
def create(data, id, level, name, startTime, state, stderr, stdout):
'\n :type data: str\n :type id: str\n :type level: int\n :type name: EnumJobResultName\n :type startTime: int\n :type state: EnumJobResultState\n :type stderr: str\n :type stdout: str\n :rtype: JobResult\n '
return JobResult(data=data, id=id, level=level, name=name, startTime=startTime, state=state, stderr=stderr, stdout=stdout) |
def as_bytes(string):
" '<binary literal>' => b'<binary literal>' "
return string.encode('latin-1', 'strict') | -3,346,993,261,030,050,000 | '<binary literal>' => b'<binary literal>' | Python_MiniGame_Fighter/venv/Lib/site-packages/pygame/compat.py | as_bytes | JE-Chen/je_old_repo | python | def as_bytes(string):
" "
return string.encode('latin-1', 'strict') |
def as_unicode(rstring):
" r'<Unicode literal>' => '<Unicode literal>' "
return rstring.encode('ascii', 'strict').decode('unicode_escape', 'strict') | -7,175,447,703,092,290,000 | r'<Unicode literal>' => '<Unicode literal>' | Python_MiniGame_Fighter/venv/Lib/site-packages/pygame/compat.py | as_unicode | JE-Chen/je_old_repo | python | def as_unicode(rstring):
" "
return rstring.encode('ascii', 'strict').decode('unicode_escape', 'strict') |
def as_bytes(string):
" '<binary literal>' => '<binary literal>' "
return string | -3,700,333,371,179,972,000 | '<binary literal>' => '<binary literal>' | Python_MiniGame_Fighter/venv/Lib/site-packages/pygame/compat.py | as_bytes | JE-Chen/je_old_repo | python | def as_bytes(string):
" "
return string |
def as_unicode(rstring):
" r'<Unicode literal>' => u'<Unicode literal>' "
return rstring.decode('unicode_escape', 'strict') | -854,986,504,844,931,500 | r'<Unicode literal>' => u'<Unicode literal>' | Python_MiniGame_Fighter/venv/Lib/site-packages/pygame/compat.py | as_unicode | JE-Chen/je_old_repo | python | def as_unicode(rstring):
" "
return rstring.decode('unicode_escape', 'strict') |
def __init__(self, device):
'Initialize the Axis event.'
self.device = device
self._attr_device_info = DeviceInfo(identifiers={(AXIS_DOMAIN, device.unique_id)}) | 6,023,322,532,559,210,000 | Initialize the Axis event. | homeassistant/components/axis/axis_base.py | __init__ | 2004happy/core | python | def __init__(self, device):
self.device = device
self._attr_device_info = DeviceInfo(identifiers={(AXIS_DOMAIN, device.unique_id)}) |
async def async_added_to_hass(self):
'Subscribe device events.'
self.async_on_remove(async_dispatcher_connect(self.hass, self.device.signal_reachable, self.update_callback)) | -7,894,750,115,983,759,000 | Subscribe device events. | homeassistant/components/axis/axis_base.py | async_added_to_hass | 2004happy/core | python | async def async_added_to_hass(self):
self.async_on_remove(async_dispatcher_connect(self.hass, self.device.signal_reachable, self.update_callback)) |
@property
def available(self):
'Return True if device is available.'
return self.device.available | -5,505,775,743,180,649,000 | Return True if device is available. | homeassistant/components/axis/axis_base.py | available | 2004happy/core | python | @property
def available(self):
return self.device.available |
@callback
def update_callback(self, no_delay=None):
'Update the entities state.'
self.async_write_ha_state() | -4,252,368,256,177,257,500 | Update the entities state. | homeassistant/components/axis/axis_base.py | update_callback | 2004happy/core | python | @callback
def update_callback(self, no_delay=None):
self.async_write_ha_state() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.