query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Selects upload/download and callback handlers. We use a callback handler that shows a simple textual progress indicator if file_size is above the configurable threshold. We use a resumable transfer handler if file_size is >= the configurable threshold and resumable transfers are supported by the given provider. boto supports resumable downloads for all providers, but resumable uploads are currently only supported by GS.
def GetTransferHandlers(self, uri, key, file_size, upload): config = boto.config resumable_threshold = config.getint('GSUtil', 'resumable_threshold', ONE_MB) if file_size >= resumable_threshold: cb = self.FileCopyCallbackHandler(upload).call num_cb = int(file_size / ONE_MB) resumable_tracker_dir = config.get('GSUtil', 'resumable_tracker_dir', '%s/.gsutil' % os.environ['HOME']) if not os.path.exists(resumable_tracker_dir): os.makedirs(resumable_tracker_dir) if upload: # Encode the src bucket and key into the tracker file name. res_tracker_file_name = ( re.sub('[/\\\\]', '_', 'resumable_upload__%s__%s.url' % (key.bucket.name, key.name))) else: # Encode the fully-qualified src file name into the tracker file name. res_tracker_file_name = ( re.sub('[/\\\\]', '_', 'resumable_download__%s.etag' % (os.path.realpath(uri.object_name)))) tracker_file = '%s%s%s' % (resumable_tracker_dir, os.sep, res_tracker_file_name) if upload: if uri.scheme == 'gs': transfer_handler = ResumableUploadHandler(tracker_file) else: transfer_handler = None else: transfer_handler = ResumableDownloadHandler(tracker_file) else: transfer_handler = None cb = None num_cb = None return (cb, num_cb, transfer_handler)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def file_size_sig_handler(self, file_size: int):\n # Verify the maximum has been set for the progress bar\n if self.fileAnalyzeProgressBar.maximum() != file_size:\n self.fileAnalyzeProgressBar.setMaximum(file_size)\n\n logging.debug(\"Set File Size: \" + str(file_size))", "def upload_progress(self, cloud_file, size, uploaded):", "def file_progress_event_handler(self, sender, bytes_read: int, total_size: int, file_name: str):\n self.sig_update_file_size.emit(total_size)\n self.sig_update_file_progress.emit(bytes_read)", "def download_progress(self, cloud_file, size, downloaded):", "def upload(files, session, samples_resource, server_url, threads=DEFAULT_UPLOAD_THREADS,\n validate=True, log_to=None, metadata=None, tags=None):\n if threads is None:\n threads = 1\n\n filenames = []\n file_sizes = []\n for file_path in files:\n normalized_filename, file_size = _file_stats(file_path, validate=validate)\n filenames.append(normalized_filename)\n file_sizes.append(file_size)\n\n # set up the logging\n bar_length = 20\n if log_to is not None:\n log_to.write('Uploading: Preparing upload(s)... ')\n log_to.flush()\n\n overall_size = sum(file_sizes)\n validated_sizes = {filename: 0 for filename in filenames}\n transferred_sizes = {filename: 0 for filename in filenames}\n\n # TODO: we should use click.progressbar?\n def progress_bar_display(file_id, bytes_transferred, validation=False):\n validation_in_progress = sum(validated_sizes.values()) != overall_size\n if validation and validation_in_progress:\n # Validating mode\n prev_progress = sum(validated_sizes.values()) / overall_size\n validated_sizes[file_id] = bytes_transferred\n progress = sum(validated_sizes.values()) / overall_size\n else:\n # Uploading mode\n prev_progress = sum(transferred_sizes.values()) / overall_size\n transferred_sizes[file_id] = bytes_transferred\n progress = sum(transferred_sizes.values()) / overall_size\n\n if floor(100 * prev_progress) == floor(100 * progress):\n return\n\n block = int(round(bar_length * progress))\n bar = '#' * block + '-' * (bar_length - block)\n if validation and validation_in_progress:\n log_to.write('\\rValidating: [{}] {:.0f}% '.format(bar, progress * 100))\n elif progress != 1:\n log_to.write('\\rUploading: [{}] {:.0f}% '.format(bar, progress * 100))\n else:\n log_to.write('\\rUploading: Finalizing upload... ')\n log_to.flush()\n\n progress_bar = None if log_to is None else progress_bar_display\n\n # first, upload all the smaller files in parallel (if multiple threads are requested)\n uploading_uuids = []\n if threads > 1:\n import ctypes\n thread_error = Value(ctypes.c_wchar_p, '')\n semaphore = BoundedSemaphore(threads)\n upload_threads = []\n\n def threaded_upload(*args):\n def _wrapped(*wrapped_args):\n semaphore.acquire()\n try:\n file_uuid = upload_file(*wrapped_args[:-1])\n if file_uuid:\n uploading_uuids.append(file_uuid)\n except Exception as e:\n # handle inside the thread to prevent the exception message from leaking out\n wrapped_args[-1].value = '{}'.format(e)\n raise SystemExit\n semaphore.release()\n\n # the thread error message must be the last parameter\n thread = Thread(target=_wrapped, args=args + (thread_error, ))\n thread.daemon = True\n thread.start()\n upload_threads.append(thread)\n else:\n threaded_upload = upload_file\n\n upload_threads = []\n uploading_files = []\n for file_path, filename, file_size in zip(files, filenames, file_sizes):\n if file_size < MULTIPART_SIZE:\n file_obj = _wrap_files(file_path, logger=progress_bar, validate=validate)\n file_uuid = threaded_upload(file_obj, filename, session, samples_resource, log_to,\n metadata, tags)\n if file_uuid:\n uploading_uuids.append(file_uuid)\n uploading_files.append(file_obj)\n\n if threads > 1:\n # we need to do this funky wait loop to ensure threads get killed by ctrl-c\n while True:\n for thread in upload_threads:\n # hopefully no one has a <5Gb file that takes longer than a week to upload\n thread.join(604800)\n if all(not thread.is_alive() for thread in upload_threads):\n break\n if thread_error.value != '':\n raise UploadException(thread_error.value)\n\n # lastly, upload all the very big files sequentially\n for file_path, filename, file_size in zip(files, filenames, file_sizes):\n if file_size >= MULTIPART_SIZE:\n file_obj = _wrap_files(file_path, logger=progress_bar, validate=validate)\n upload_large_file(file_obj, filename, session, samples_resource, server_url,\n threads=threads, log_to=log_to)\n file_obj.close()\n\n if log_to is not None:\n log_to.write('\\rUploading: All complete.' + (bar_length - 3) * ' ' + '\\n')\n log_to.flush()\n\n return uploading_uuids", "def too_many_requests_handler(self, too_many_requests_handler: Callable[[Response], float]):\n self._too_many_requests_handler = too_many_requests_handler", "def file_progress_sig_handler(self, bytes_read: int):\n # Increment the bytes read\n self.file_bytes_read += bytes_read\n\n # Update the progress bar\n self.fileAnalyzeProgressBar.setValue(self.file_bytes_read)\n\n logging.debug(\"Analyzing File Progress: \" + str(self.file_bytes_read))", "def upload_file(\n storage_client: Union[\"BlobStorageClient\", \"Gen2StorageClient\"],\n source: str,\n dest: Optional[str] = None,\n msg: Optional[str] = None,\n size: int = 0,\n show_progress: Optional[bool] = None,\n in_directory: bool = False,\n callback: Optional[Any] = None,\n) -> None:\n validate_content = size > 0 # don't do checksum for empty files\n\n if (\n type(storage_client).__name__ == GEN2_STORAGE_CLIENT_NAME\n ): # Only for Gen2StorageClient, Blob Storage doesn't have true directories\n if in_directory:\n storage_client.temp_sub_directory_client = None\n file_name_tail = dest.split(os.path.sep)[-1]\n # Indexing from 2 because the first two parts of the remote path will always be LocalUpload/<asset_id>\n all_sub_folders = dest.split(os.path.sep)[2:-1]\n\n # Create remote directories for each nested directory if file is in a nested directory\n for sub_folder in all_sub_folders:\n if storage_client.temp_sub_directory_client:\n storage_client.temp_sub_directory_client = (\n storage_client.temp_sub_directory_client.create_sub_directory(sub_folder)\n )\n else:\n storage_client.temp_sub_directory_client = storage_client.directory_client.create_sub_directory(\n sub_folder\n )\n\n storage_client.file_client = storage_client.temp_sub_directory_client.create_file(file_name_tail)\n else:\n storage_client.file_client = storage_client.directory_client.create_file(source.split(\"/\")[-1])\n\n with open(source, \"rb\") as data:\n if show_progress and not in_directory:\n file_size, _ = get_directory_size(source)\n file_size_in_mb = file_size / 10**6\n if file_size_in_mb < 1:\n msg += Fore.GREEN + \" (< 1 MB)\"\n else:\n msg += Fore.GREEN + f\" ({round(file_size_in_mb, 2)} MBs)\"\n cntx_manager = FileUploadProgressBar(msg=msg)\n else:\n cntx_manager = suppress()\n\n with cntx_manager as c:\n callback = c.update_to if (show_progress and not in_directory) else None\n if type(storage_client).__name__ == GEN2_STORAGE_CLIENT_NAME:\n storage_client.file_client.upload_data(\n data=data.read(),\n overwrite=True,\n validate_content=validate_content,\n raw_response_hook=callback,\n max_concurrency=MAX_CONCURRENCY,\n )\n elif type(storage_client).__name__ == BLOB_STORAGE_CLIENT_NAME:\n storage_client.container_client.upload_blob(\n name=dest,\n data=data,\n validate_content=validate_content,\n overwrite=storage_client.overwrite,\n raw_response_hook=callback,\n max_concurrency=MAX_CONCURRENCY,\n connection_timeout=DEFAULT_CONNECTION_TIMEOUT,\n )\n\n storage_client.uploaded_file_count += 1", "def select_file_upload_method():\n\n if not Settings.prompt(\"upload files\"): \n return \"unset\"\n Settings.print(\"Select an upload source\")\n sources = Settings.get_source_options()\n question = {\n 'type': 'list',\n 'name': 'upload',\n 'message': 'Upload:',\n 'choices': [src.title() for src in sources]\n }\n upload = PyInquirer.prompt(question)[\"upload\"]\n\n\n # everything after this part should be in another function\n # this should just return the string of the upload source\n\n\n if str(upload) == \"Local\":\n return File.select_files()\n elif str(upload) == \"Google\":\n return Google_File.select_files()\n # elif str(upload) == \"Dropbox\":\n # return Dropbox.select_files()\n elif str(upload) == \"Remote\":\n return Remote.select_files()\n return File.select_files()", "def _attempt_resumable_upload(self, key, fp, file_length, headers, cb,\r\n num_cb):\r\n (server_start, server_end) = self.SERVER_HAS_NOTHING\r\n conn = key.bucket.connection\r\n if self.tracker_uri:\r\n # Try to resume existing resumable upload.\r\n try:\r\n (server_start, server_end) = (\r\n self._query_server_pos(conn, file_length))\r\n self.server_has_bytes = server_start\r\n key=key\r\n if conn.debug >= 1:\r\n print 'Resuming transfer.'\r\n except ResumableUploadException, e:\r\n if conn.debug >= 1:\r\n print 'Unable to resume transfer (%s).' % e.message\r\n self._start_new_resumable_upload(key, headers)\r\n else:\r\n self._start_new_resumable_upload(key, headers)\r\n\r\n # upload_start_point allows the code that instantiated the\r\n # ResumableUploadHandler to find out the point from which it started\r\n # uploading (e.g., so it can correctly compute throughput).\r\n if self.upload_start_point is None:\r\n self.upload_start_point = server_end\r\n\r\n if server_end == file_length:\r\n # Boundary condition: complete file was already uploaded (e.g.,\r\n # user interrupted a previous upload attempt after the upload\r\n # completed but before the gsutil tracker file was deleted). Set\r\n # total_bytes_uploaded to server_end so we'll attempt to upload\r\n # no more bytes but will still make final HTTP request and get\r\n # back the response (which contains the etag we need to compare\r\n # at the end).\r\n total_bytes_uploaded = server_end\r\n else:\r\n total_bytes_uploaded = server_end + 1\r\n fp.seek(total_bytes_uploaded)\r\n conn = key.bucket.connection\r\n\r\n # Get a new HTTP connection (vs conn.get_http_connection(), which reuses\r\n # pool connections) because httplib requires a new HTTP connection per\r\n # transaction. (Without this, calling http_conn.getresponse() would get\r\n # \"ResponseNotReady\".)\r\n http_conn = conn.new_http_connection(self.tracker_uri_host,\r\n conn.is_secure)\r\n http_conn.set_debuglevel(conn.debug)\r\n\r\n # Make sure to close http_conn at end so if a local file read\r\n # failure occurs partway through server will terminate current upload\r\n # and can report that progress on next attempt.\r\n try:\r\n return self._upload_file_bytes(conn, http_conn, fp, file_length,\r\n total_bytes_uploaded, cb, num_cb)\r\n except (ResumableUploadException, socket.error):\r\n resp = self._query_server_state(conn, file_length)\r\n if resp.status == 400:\r\n raise ResumableUploadException('Got 400 response from server '\r\n 'state query after failed resumable upload attempt. This '\r\n 'can happen if the file size changed between upload '\r\n 'attempts', ResumableTransferDisposition.ABORT)\r\n else:\r\n raise\r\n finally:\r\n http_conn.close()", "def download_progress_hook(count, blockSize, totalSize):\n percent = int(count * blockSize * 100 / totalSize)\n\n global last_percent_reported\n if last_percent_reported != percent:\n if percent % 5 == 0:\n sys.stdout.write(\"%s%%\" % percent)\n sys.stdout.flush()\n else:\n sys.stdout.write(\".\")\n sys.stdout.flush()\n last_percent_reported = percent", "def too_many_requests_handler(self) -> Callable[[Response], float]:\n if not self._too_many_requests_handler:\n self._too_many_requests_handler = default_too_many_requests_handler\n return self._too_many_requests_handler", "def download_progress_hook(count, blockSize, totalSize):\n global last_percent_reported\n percent = int(count * blockSize * 100 / totalSize)\n\n if last_percent_reported != percent:\n if percent % 5 == 0:\n sys.stdout.write(\"%s%%\" % percent)\n sys.stdout.flush()\n else:\n sys.stdout.write(\".\")\n sys.stdout.flush()\n\n last_percent_reported = percent", "def download_progress_callback(block_num, block_size, expected_size):\n total_blocks = int(math.ceil(expected_size / block_size))\n progress_increment = int(math.ceil(total_blocks / 100))\n\n if block_num % progress_increment == 0:\n sys.stdout.write(\".\")\n sys.stdout.flush()\n if block_num * block_size >= expected_size:\n print(\"\")", "def do_process_user_file_chunks(\n page_size: int, error_handler: ErrorHandler, position: int, participant: Participant\n):\n \n # FIXME: this is a gross hack to force some time related safety, which is only ever used deep\n # inside of data processing.\n common_constants.LATEST_POSSIBLE_DATA_TIMESTAMP = \\\n int(time.mktime((timezone.now() + timedelta(days=90)).timetuple()))\n \n # Declare a defaultdict of a tuple of 2 lists\n all_binified_data = defaultdict(lambda: ([], []))\n ftps_to_remove = set()\n # The ThreadPool enables downloading multiple files simultaneously from the network, and continuing\n # to download files as other files are being processed, making the code as a whole run faster.\n # In principle we could make a global pool that is free-memory aware.\n pool = ThreadPool(CONCURRENT_NETWORK_OPS)\n survey_id_dict = {}\n \n # A Django query with a slice (e.g. .all()[x:y]) makes a LIMIT query, so it\n # only gets from the database those FTPs that are in the slice.\n # print(participant.as_dict())\n print(\"Number Files To Process:\", participant.files_to_process.exclude(deleted=True).count())\n print(f\"will process {page_size} files.\")\n print(\"current count processing within this run:\", position)\n \n # TODO: investigate, comment. ordering by path results in files grouped by type and\n # chronological order, which is perfect for download efficiency... right? would it break anthing?\n files_to_process = participant.files_to_process \\\n .exclude(deleted=True) #.order_by(\"s3_file_path\", \"created_on\")\n \n # This pool pulls in data for each FileForProcessing on a background thread and instantiates it.\n # Instantiating a FileForProcessing object queries S3 for the File's data. (network request))\n files_for_processing = pool.map(\n FileForProcessing, files_to_process[position: position + page_size], chunksize=1\n )\n \n for file_for_processing in files_for_processing:\n with error_handler:\n process_one_file(\n file_for_processing, survey_id_dict, all_binified_data, ftps_to_remove\n )\n pool.close()\n pool.terminate()\n \n # there are several failure modes and success modes, information for what to do with different\n # files percolates back to here. Delete various database objects accordingly.\n more_ftps_to_remove, number_bad_files, earliest_time_bin, latest_time_bin = upload_binified_data(\n all_binified_data, error_handler, survey_id_dict, participant\n )\n ftps_to_remove.update(more_ftps_to_remove)\n \n # Update the data quantity stats, if it actually processed any files\n if len(files_to_process) > 0:\n calculate_data_quantity_stats(participant,\n earliest_time_bin_number=earliest_time_bin,\n latest_time_bin_number=latest_time_bin)\n \n # Actually delete the processed FTPs from the database\n FileToProcess.objects.filter(pk__in=ftps_to_remove).delete()\n return number_bad_files", "def btn_upload_callback(self):\n # Create File Select Dialog\n dialog = QFileDialog(parent=self, caption='Images')\n dialog.setMimeTypeFilters(\n [\"image/jpeg\", \"image/png\", \"image/tiff\", 'application/zip'])\n dialog.setFileMode(QFileDialog.ExistingFile)\n\n if dialog.exec_() == QDialog.Accepted:\n\n filename = dialog.selectedFiles()[0]\n\n with open(filename, 'rb') as f:\n file_b64s = fio_to_b64s(f)\n\n if ext_from_path(filename) == '.zip':\n ret = api.upload_zip(\n file_b64s,\n nameext_from_path(filename),\n self.user_hash\n )\n else:\n ret = api.upload_image(\n file_b64s,\n nameext_from_path(filename),\n self.user_hash\n )\n if ret.get('success') is False:\n self.show_error(ret['error_msg'])\n self.update_table()", "def PerformResumableUploadIfApplies(self, fp, dst_uri, headers, canned_acl):\n start_time = time.time()\n file_size = os.path.getsize(fp.name)\n dst_key = dst_uri.new_key(False, headers)\n (cb, num_cb, res_upload_handler) = self.GetTransferHandlers(\n dst_uri, dst_key, file_size, True)\n if dst_uri.scheme == 'gs':\n # Resumable upload protocol is Google Storage-specific.\n dst_key.set_contents_from_file(fp, headers=headers, policy=canned_acl,\n cb=cb, num_cb=num_cb,\n res_upload_handler=res_upload_handler)\n else:\n dst_key.set_contents_from_file(fp, headers=headers, policy=canned_acl,\n cb=cb, num_cb=num_cb)\n if res_upload_handler:\n bytes_transferred = file_size - res_upload_handler.upload_start_point\n else:\n bytes_transferred = file_size\n end_time = time.time()\n return (end_time - start_time, bytes_transferred)", "def progress_callback(self, func):\n self.curl.setopt(pycurl.PROGRESSFUNCTION, func)", "def _progress_enabled_zip_write_handler(\n zip_file_handler: zipfile.ZipFile, progress_bar: tqdm.tqdm\n) -> Iterator[zipfile.ZipFile]:\n\n def _write_with_progress(\n original_write_fct, self, data, pbar # pylint: disable=unused-argument\n ):\n pbar.update(len(data))\n return original_write_fct(data)\n\n # Replace original write() with a wrapper to track progress\n assert zip_file_handler.fp # nosec\n old_write_method = zip_file_handler.fp.write\n zip_file_handler.fp.write = types.MethodType( # type: ignore[assignment]\n partial(_write_with_progress, old_write_method, pbar=progress_bar),\n zip_file_handler.fp,\n )\n try:\n yield zip_file_handler\n finally:\n zip_file_handler.fp.write = old_write_method # type: ignore[method-assign]", "def _config_filehandler(self, **kwargs):\n tool = FileHandlerFactory(self.filehandler_type, **kwargs)\n self._tools.append(tool.to_msg())", "def send_file(self, key, fp, headers, cb=None, num_cb=10):\r\n\r\n if not headers:\r\n headers = {}\r\n\r\n fp.seek(0, os.SEEK_END)\r\n file_length = fp.tell()\r\n fp.seek(0)\r\n debug = key.bucket.connection.debug\r\n\r\n # Use num-retries from constructor if one was provided; else check\r\n # for a value specified in the boto config file; else default to 5.\r\n if self.num_retries is None:\r\n self.num_retries = config.getint('Boto', 'num_retries', 5)\r\n progress_less_iterations = 0\r\n\r\n while True: # Retry as long as we're making progress.\r\n server_had_bytes_before_attempt = self.server_has_bytes\r\n try:\r\n etag = self._attempt_resumable_upload(key, fp, file_length,\r\n headers, cb, num_cb)\r\n # Upload succceded, so remove the tracker file (if have one).\r\n self._remove_tracker_file()\r\n self._check_final_md5(key, etag)\r\n if debug >= 1:\r\n print 'Resumable upload complete.'\r\n return\r\n except self.RETRYABLE_EXCEPTIONS, e:\r\n if debug >= 1:\r\n print('Caught exception (%s)' % e.__repr__())\r\n if isinstance(e, IOError) and e.errno == errno.EPIPE:\r\n # Broken pipe error causes httplib to immediately\r\n # close the socket (http://bugs.python.org/issue5542),\r\n # so we need to close the connection before we resume\r\n # the upload (which will cause a new connection to be\r\n # opened the next time an HTTP request is sent).\r\n key.bucket.connection.connection.close()\r\n except ResumableUploadException, e:\r\n if (e.disposition ==\r\n ResumableTransferDisposition.ABORT_CUR_PROCESS):\r\n if debug >= 1:\r\n print('Caught non-retryable ResumableUploadException '\r\n '(%s); aborting but retaining tracker file' %\r\n e.message)\r\n raise\r\n elif (e.disposition ==\r\n ResumableTransferDisposition.ABORT):\r\n if debug >= 1:\r\n print('Caught non-retryable ResumableUploadException '\r\n '(%s); aborting and removing tracker file' %\r\n e.message)\r\n self._remove_tracker_file()\r\n raise\r\n else:\r\n if debug >= 1:\r\n print('Caught ResumableUploadException (%s) - will '\r\n 'retry' % e.message)\r\n\r\n # At this point we had a re-tryable failure; see if made progress.\r\n if self.server_has_bytes > server_had_bytes_before_attempt:\r\n progress_less_iterations = 0\r\n else:\r\n progress_less_iterations += 1\r\n\r\n if progress_less_iterations > self.num_retries:\r\n # Don't retry any longer in the current process.\r\n raise ResumableUploadException(\r\n 'Too many resumable upload attempts failed without '\r\n 'progress. You might try this upload again later',\r\n ResumableTransferDisposition.ABORT_CUR_PROCESS)\r\n\r\n # Use binary exponential backoff to desynchronize client requests\r\n sleep_time_secs = random.random() * (2**progress_less_iterations)\r\n if debug >= 1:\r\n print ('Got retryable failure (%d progress-less in a row).\\n'\r\n 'Sleeping %3.1f seconds before re-trying' %\r\n (progress_less_iterations, sleep_time_secs))\r\n time.sleep(sleep_time_secs)", "def upload(request):\n if request.method != \"POST\":\n return probe(request)\n\n md5chunk = request.args.get('md5chunk', False)\n md5total = request.args.get('md5total', False)\n\n chunk = int(request.args.get('chunk', 0))\n chunks = int(request.args.get('chunks', 0))\n\n if md5chunk and md5total:\n filename = upload_with_checksum(request, md5chunk, md5total, chunk, chunks)\n else:\n filename = upload_simple(request, chunk)\n\n return Response('%s uploaded' % filename)", "def cb(self, complete, total):\n percent = int(complete * 100.0 / total)\n log.info(\"Upload completion: {0}%\".format(percent))", "def download_report_hook(count, block_size, total_size):\n percent = int(count * block_size * 100 / total_size)\n print(\"\\r%d%%\" % percent + \" completed\", end=\"\\r\")", "def download_report_hook(count, block_size, total_size):\n percent = int(count * block_size * 100 / total_size)\n print(\"\\r%d%%\" % percent + \" completed\", end=\"\\r\")", "def cb(self, complete, total):\n \"\"\"Swift client does not support callbak\"\"\"\n percent = int(complete * 100.0 / total)\n log.info(\"Upload completion: {0}%\".format(percent))", "def download_callback_verifier(num_bytes, total_bytes):\n if num_bytes > 0:\n msg = \"Verified Read access, with partial download {}/{} MiB\".format(num_bytes / 1.e6, total_bytes / 1.e6)\n raise VerificationComplete(msg)", "def upload_lambda_functions(context: CfnginContext, provider: Provider, **kwargs: Any):\n LOGGER.warning(\n \"%s is deprecated and will be removed in a future release - \"\n \"see documentation for replacement\",\n __name__,\n )\n # TODO add better handling for misconfiguration (e.g. forgetting function names)\n # TODO support defining dockerize_pip options at the top level of args\n custom_bucket = cast(str, kwargs.get(\"bucket\", \"\"))\n if not custom_bucket:\n if not context.bucket_name:\n raise ValueError(\"hook requires bucket argument or top-level cfngin_hook\")\n bucket_name = context.bucket_name\n LOGGER.info(\"using default bucket from CFNgin: %s\", bucket_name)\n else:\n bucket_name = custom_bucket\n LOGGER.info(\"using custom bucket: %s\", bucket_name)\n\n custom_bucket_region = cast(str, kwargs.get(\"bucket_region\", \"\"))\n if not custom_bucket and custom_bucket_region:\n raise ValueError(\"Cannot specify `bucket_region` without specifying `bucket`.\")\n\n bucket_region = select_bucket_region(\n custom_bucket,\n custom_bucket_region,\n context.config.cfngin_bucket_region,\n provider.region or \"us-east-1\",\n )\n\n # Check if we should walk / follow symlinks\n follow_symlinks = kwargs.get(\"follow_symlinks\", False)\n if not isinstance(follow_symlinks, bool):\n raise ValueError(\"follow_symlinks option must be a boolean\")\n\n # Check for S3 object acl. Valid values from:\n # https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl\n payload_acl = cast(\"ObjectCannedACLType\", kwargs.get(\"payload_acl\", \"private\"))\n\n # Always use the global client for s3\n session = context.get_session(region=bucket_region)\n s3_client = session.client(\"s3\")\n\n ensure_s3_bucket(s3_client, bucket_name, bucket_region)\n\n prefix = kwargs.get(\"prefix\", \"\")\n\n results: Dict[str, Any] = {}\n for name, options in kwargs[\"functions\"].items():\n sys_path = (\n os.path.dirname(context.config_path)\n if os.path.isfile(context.config_path)\n else context.config_path\n )\n results[name] = _upload_function(\n s3_client,\n bucket_name,\n prefix,\n name,\n options,\n follow_symlinks,\n payload_acl,\n str(sys_path),\n work_dir=context.work_dir,\n )\n\n return results", "def send_file(self, fp, headers=None, cb=None, num_cb=10,\r\n query_args=None, chunked_transfer=False):\r\n provider = self.bucket.connection.provider\r\n\r\n def sender(http_conn, method, path, data, headers):\r\n http_conn.putrequest(method, path)\r\n for key in headers:\r\n http_conn.putheader(key, headers[key])\r\n http_conn.endheaders()\r\n if chunked_transfer:\r\n # MD5 for the stream has to be calculated on the fly, as\r\n # we don't know the size of the stream before hand.\r\n m = md5()\r\n else:\r\n fp.seek(0)\r\n\r\n save_debug = self.bucket.connection.debug\r\n self.bucket.connection.debug = 0\r\n # If the debuglevel < 3 we don't want to show connection\r\n # payload, so turn off HTTP connection-level debug output (to\r\n # be restored below).\r\n # Use the getattr approach to allow this to work in AppEngine.\r\n if getattr(http_conn, 'debuglevel', 0) < 3:\r\n http_conn.set_debuglevel(0)\r\n if cb:\r\n if chunked_transfer:\r\n # For chunked Transfer, we call the cb for every 1MB\r\n # of data transferred.\r\n cb_count = (1024 * 1024)/self.BufferSize\r\n self.size = 0\r\n elif num_cb > 2:\r\n cb_count = self.size / self.BufferSize / (num_cb-2)\r\n elif num_cb < 0:\r\n cb_count = -1\r\n else:\r\n cb_count = 0\r\n i = total_bytes = 0\r\n cb(total_bytes, self.size)\r\n l = fp.read(self.BufferSize)\r\n while len(l) > 0:\r\n if chunked_transfer:\r\n http_conn.send('%x;\\r\\n' % len(l))\r\n http_conn.send(l)\r\n http_conn.send('\\r\\n')\r\n else:\r\n http_conn.send(l)\r\n if cb:\r\n total_bytes += len(l)\r\n i += 1\r\n if i == cb_count or cb_count == -1:\r\n cb(total_bytes, self.size)\r\n i = 0\r\n if chunked_transfer:\r\n m.update(l)\r\n l = fp.read(self.BufferSize)\r\n if chunked_transfer:\r\n http_conn.send('0\\r\\n')\r\n http_conn.send('\\r\\n')\r\n if cb:\r\n self.size = total_bytes\r\n # Get the md5 which is calculated on the fly.\r\n self.md5 = m.hexdigest()\r\n else:\r\n fp.seek(0)\r\n if cb:\r\n cb(total_bytes, self.size)\r\n response = http_conn.getresponse()\r\n body = response.read()\r\n http_conn.set_debuglevel(save_debug)\r\n self.bucket.connection.debug = save_debug\r\n if ((response.status == 500 or response.status == 503 or\r\n response.getheader('location')) and not chunked_transfer):\r\n # we'll try again.\r\n return response\r\n elif response.status >= 200 and response.status <= 299:\r\n self.etag = response.getheader('etag')\r\n if self.etag != '\"%s\"' % self.md5:\r\n raise provider.storage_data_error(\r\n 'ETag from S3 did not match computed MD5')\r\n return response\r\n else:\r\n raise provider.storage_response_error(\r\n response.status, response.reason, body)\r\n\r\n if not headers:\r\n headers = {}\r\n else:\r\n headers = headers.copy()\r\n headers['User-Agent'] = UserAgent\r\n if self.base64md5:\r\n headers['Content-MD5'] = self.base64md5\r\n if self.storage_class != 'STANDARD':\r\n headers[provider.storage_class_header] = self.storage_class\r\n if headers.has_key('Content-Encoding'):\r\n self.content_encoding = headers['Content-Encoding']\r\n if headers.has_key('Content-Type'):\r\n self.content_type = headers['Content-Type']\r\n elif self.path:\r\n self.content_type = mimetypes.guess_type(self.path)[0]\r\n if self.content_type == None:\r\n self.content_type = self.DefaultContentType\r\n headers['Content-Type'] = self.content_type\r\n else:\r\n headers['Content-Type'] = self.content_type\r\n if not chunked_transfer:\r\n headers['Content-Length'] = str(self.size)\r\n headers['Expect'] = '100-Continue'\r\n headers = boto.utils.merge_meta(headers, self.metadata, provider)\r\n resp = self.bucket.connection.make_request('PUT', self.bucket.name,\r\n self.name, headers,\r\n sender=sender,\r\n query_args=query_args)\r\n self.handle_version_headers(resp, force=True)", "def rate_limit_handler(self, rate_limit_handler: RateLimitHandler):\n self._rate_limit_handler = rate_limit_handler\n if self._custom_adapter:\n self._custom_adapter.rate_limit_handler = rate_limit_handler" ]
[ "0.544197", "0.51174533", "0.5087956", "0.49688822", "0.48351347", "0.48325843", "0.47526792", "0.47526148", "0.4721983", "0.4708315", "0.47005746", "0.46767825", "0.4652243", "0.46501178", "0.46301877", "0.46250758", "0.46141136", "0.45568892", "0.45161462", "0.45094308", "0.4503601", "0.44819698", "0.4468478", "0.4434832", "0.4415924", "0.44128138", "0.44120175", "0.43873197", "0.43864205", "0.436398" ]
0.6246947
0
Performs resumable upload if supported by provider and file is above threshold, else performs nonresumable upload. Returns (elapsed_time, bytes_transferred).
def PerformResumableUploadIfApplies(self, fp, dst_uri, headers, canned_acl): start_time = time.time() file_size = os.path.getsize(fp.name) dst_key = dst_uri.new_key(False, headers) (cb, num_cb, res_upload_handler) = self.GetTransferHandlers( dst_uri, dst_key, file_size, True) if dst_uri.scheme == 'gs': # Resumable upload protocol is Google Storage-specific. dst_key.set_contents_from_file(fp, headers=headers, policy=canned_acl, cb=cb, num_cb=num_cb, res_upload_handler=res_upload_handler) else: dst_key.set_contents_from_file(fp, headers=headers, policy=canned_acl, cb=cb, num_cb=num_cb) if res_upload_handler: bytes_transferred = file_size - res_upload_handler.upload_start_point else: bytes_transferred = file_size end_time = time.time() return (end_time - start_time, bytes_transferred)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _attempt_resumable_upload(self, key, fp, file_length, headers, cb,\r\n num_cb):\r\n (server_start, server_end) = self.SERVER_HAS_NOTHING\r\n conn = key.bucket.connection\r\n if self.tracker_uri:\r\n # Try to resume existing resumable upload.\r\n try:\r\n (server_start, server_end) = (\r\n self._query_server_pos(conn, file_length))\r\n self.server_has_bytes = server_start\r\n key=key\r\n if conn.debug >= 1:\r\n print 'Resuming transfer.'\r\n except ResumableUploadException, e:\r\n if conn.debug >= 1:\r\n print 'Unable to resume transfer (%s).' % e.message\r\n self._start_new_resumable_upload(key, headers)\r\n else:\r\n self._start_new_resumable_upload(key, headers)\r\n\r\n # upload_start_point allows the code that instantiated the\r\n # ResumableUploadHandler to find out the point from which it started\r\n # uploading (e.g., so it can correctly compute throughput).\r\n if self.upload_start_point is None:\r\n self.upload_start_point = server_end\r\n\r\n if server_end == file_length:\r\n # Boundary condition: complete file was already uploaded (e.g.,\r\n # user interrupted a previous upload attempt after the upload\r\n # completed but before the gsutil tracker file was deleted). Set\r\n # total_bytes_uploaded to server_end so we'll attempt to upload\r\n # no more bytes but will still make final HTTP request and get\r\n # back the response (which contains the etag we need to compare\r\n # at the end).\r\n total_bytes_uploaded = server_end\r\n else:\r\n total_bytes_uploaded = server_end + 1\r\n fp.seek(total_bytes_uploaded)\r\n conn = key.bucket.connection\r\n\r\n # Get a new HTTP connection (vs conn.get_http_connection(), which reuses\r\n # pool connections) because httplib requires a new HTTP connection per\r\n # transaction. (Without this, calling http_conn.getresponse() would get\r\n # \"ResponseNotReady\".)\r\n http_conn = conn.new_http_connection(self.tracker_uri_host,\r\n conn.is_secure)\r\n http_conn.set_debuglevel(conn.debug)\r\n\r\n # Make sure to close http_conn at end so if a local file read\r\n # failure occurs partway through server will terminate current upload\r\n # and can report that progress on next attempt.\r\n try:\r\n return self._upload_file_bytes(conn, http_conn, fp, file_length,\r\n total_bytes_uploaded, cb, num_cb)\r\n except (ResumableUploadException, socket.error):\r\n resp = self._query_server_state(conn, file_length)\r\n if resp.status == 400:\r\n raise ResumableUploadException('Got 400 response from server '\r\n 'state query after failed resumable upload attempt. This '\r\n 'can happen if the file size changed between upload '\r\n 'attempts', ResumableTransferDisposition.ABORT)\r\n else:\r\n raise\r\n finally:\r\n http_conn.close()", "def upload_progress(self, cloud_file, size, uploaded):", "def resumable_upload(insert_request):\r\n response = None\r\n error = None\r\n retry = 0\r\n while response is None:\r\n try:\r\n print(\"Uploading file...\")\r\n status, response = insert_request.next_chunk()\r\n if 'id' in response:\r\n print(\"Video id '%s' was successfully uploaded.\" % response['id'])\r\n f=open(\"uploadedIDs.txt\",\"a+\")\r\n f.write(\"\\n\"+response['id'])\r\n f.close()\r\n else:\r\n exit(\"The upload failed with an unexpected response: %s\" % response)\r\n except HttpError:\r\n import sys\r\n e = sys.exc_info()[1]\r\n if e.resp.status in RETRIABLE_STATUS_CODES:\r\n error = \"A retriable HTTP error %d occurred:\\n%s\" % (e.resp.status,\r\n e.content)\r\n else:\r\n raise\r\n except RETRIABLE_EXCEPTIONS:\r\n import sys\r\n e = sys.exc_info()[1]\r\n error = \"A retriable error occurred: %s\" % e\r\n\r\n if error is not None:\r\n print(error)\r\n retry += 1\r\n if retry > MAX_RETRIES:\r\n exit(\"No longer attempting to retry.\")\r\n\r\n max_sleep = 2 ** retry\r\n sleep_seconds = random.random() * max_sleep\r\n print(\"Sleeping %f seconds and then retrying...\" % sleep_seconds)\r\n time.sleep(sleep_seconds)", "def _UploadWithProgressInternal(self, media, gcs_file_name):\n bucket, bucket_path = self._ParseBucketAndPath(gcs_file_name)\n request = self._service.objects().insert(bucket=bucket,\n media_body=media,\n name=bucket_path)\n if media._size == 0: # pylint: disable=protected-access\n return self._RunWithRetries(request.execute, self._CommonErrorMatcher)\n\n response = None\n logged_percent_done = 0\n while response is None:\n status, response = self._RunWithRetries(request.next_chunk,\n self._CommonErrorMatcher)\n if status:\n percent_done = int(status.progress() * 100)\n if percent_done - logged_percent_done >= 5:\n logging.info('Uploading to gs://%s/%s: %d%% complete.',\n bucket,\n bucket_path,\n int(status.progress() * 100))\n logged_percent_done = percent_done\n return response", "def send_file(self, key, fp, headers, cb=None, num_cb=10):\r\n\r\n if not headers:\r\n headers = {}\r\n\r\n fp.seek(0, os.SEEK_END)\r\n file_length = fp.tell()\r\n fp.seek(0)\r\n debug = key.bucket.connection.debug\r\n\r\n # Use num-retries from constructor if one was provided; else check\r\n # for a value specified in the boto config file; else default to 5.\r\n if self.num_retries is None:\r\n self.num_retries = config.getint('Boto', 'num_retries', 5)\r\n progress_less_iterations = 0\r\n\r\n while True: # Retry as long as we're making progress.\r\n server_had_bytes_before_attempt = self.server_has_bytes\r\n try:\r\n etag = self._attempt_resumable_upload(key, fp, file_length,\r\n headers, cb, num_cb)\r\n # Upload succceded, so remove the tracker file (if have one).\r\n self._remove_tracker_file()\r\n self._check_final_md5(key, etag)\r\n if debug >= 1:\r\n print 'Resumable upload complete.'\r\n return\r\n except self.RETRYABLE_EXCEPTIONS, e:\r\n if debug >= 1:\r\n print('Caught exception (%s)' % e.__repr__())\r\n if isinstance(e, IOError) and e.errno == errno.EPIPE:\r\n # Broken pipe error causes httplib to immediately\r\n # close the socket (http://bugs.python.org/issue5542),\r\n # so we need to close the connection before we resume\r\n # the upload (which will cause a new connection to be\r\n # opened the next time an HTTP request is sent).\r\n key.bucket.connection.connection.close()\r\n except ResumableUploadException, e:\r\n if (e.disposition ==\r\n ResumableTransferDisposition.ABORT_CUR_PROCESS):\r\n if debug >= 1:\r\n print('Caught non-retryable ResumableUploadException '\r\n '(%s); aborting but retaining tracker file' %\r\n e.message)\r\n raise\r\n elif (e.disposition ==\r\n ResumableTransferDisposition.ABORT):\r\n if debug >= 1:\r\n print('Caught non-retryable ResumableUploadException '\r\n '(%s); aborting and removing tracker file' %\r\n e.message)\r\n self._remove_tracker_file()\r\n raise\r\n else:\r\n if debug >= 1:\r\n print('Caught ResumableUploadException (%s) - will '\r\n 'retry' % e.message)\r\n\r\n # At this point we had a re-tryable failure; see if made progress.\r\n if self.server_has_bytes > server_had_bytes_before_attempt:\r\n progress_less_iterations = 0\r\n else:\r\n progress_less_iterations += 1\r\n\r\n if progress_less_iterations > self.num_retries:\r\n # Don't retry any longer in the current process.\r\n raise ResumableUploadException(\r\n 'Too many resumable upload attempts failed without '\r\n 'progress. You might try this upload again later',\r\n ResumableTransferDisposition.ABORT_CUR_PROCESS)\r\n\r\n # Use binary exponential backoff to desynchronize client requests\r\n sleep_time_secs = random.random() * (2**progress_less_iterations)\r\n if debug >= 1:\r\n print ('Got retryable failure (%d progress-less in a row).\\n'\r\n 'Sleeping %3.1f seconds before re-trying' %\r\n (progress_less_iterations, sleep_time_secs))\r\n time.sleep(sleep_time_secs)", "def _upload_file_bytes(self, conn, http_conn, fp, file_length,\r\n total_bytes_uploaded, cb, num_cb):\r\n buf = fp.read(self.BUFFER_SIZE)\r\n if cb:\r\n if num_cb > 2:\r\n cb_count = file_length / self.BUFFER_SIZE / (num_cb-2)\r\n elif num_cb < 0:\r\n cb_count = -1\r\n else:\r\n cb_count = 0\r\n i = 0\r\n cb(total_bytes_uploaded, file_length)\r\n\r\n # Build resumable upload headers for the transfer. Don't send a\r\n # Content-Range header if the file is 0 bytes long, because the\r\n # resumable upload protocol uses an *inclusive* end-range (so, sending\r\n # 'bytes 0-0/1' would actually mean you're sending a 1-byte file).\r\n put_headers = {}\r\n if file_length:\r\n range_header = self._build_content_range_header(\r\n '%d-%d' % (total_bytes_uploaded, file_length - 1),\r\n file_length)\r\n put_headers['Content-Range'] = range_header\r\n # Set Content-Length to the total bytes we'll send with this PUT.\r\n put_headers['Content-Length'] = str(file_length - total_bytes_uploaded)\r\n http_request = AWSAuthConnection.build_base_http_request(\r\n conn, 'PUT', path=self.tracker_uri_path, auth_path=None,\r\n headers=put_headers, host=self.tracker_uri_host)\r\n http_conn.putrequest('PUT', http_request.path)\r\n for k in put_headers:\r\n http_conn.putheader(k, put_headers[k])\r\n http_conn.endheaders()\r\n\r\n # Turn off debug on http connection so upload content isn't included\r\n # in debug stream.\r\n http_conn.set_debuglevel(0)\r\n while buf:\r\n http_conn.send(buf)\r\n total_bytes_uploaded += len(buf)\r\n if cb:\r\n i += 1\r\n if i == cb_count or cb_count == -1:\r\n cb(total_bytes_uploaded, file_length)\r\n i = 0\r\n buf = fp.read(self.BUFFER_SIZE)\r\n if cb:\r\n cb(total_bytes_uploaded, file_length)\r\n if total_bytes_uploaded != file_length:\r\n # Abort (and delete the tracker file) so if the user retries\r\n # they'll start a new resumable upload rather than potentially\r\n # attempting to pick back up later where we left off.\r\n raise ResumableUploadException(\r\n 'File changed during upload: EOF at %d bytes of %d byte file.' %\r\n (total_bytes_uploaded, file_length),\r\n ResumableTransferDisposition.ABORT)\r\n resp = http_conn.getresponse()\r\n body = resp.read()\r\n # Restore http connection debug level.\r\n http_conn.set_debuglevel(conn.debug)\r\n\r\n if resp.status == 200:\r\n return resp.getheader('etag') # Success\r\n # Retry timeout (408) and status 500 and 503 errors after a delay.\r\n elif resp.status in [408, 500, 503]:\r\n disposition = ResumableTransferDisposition.WAIT_BEFORE_RETRY\r\n else:\r\n # Catch all for any other error codes.\r\n disposition = ResumableTransferDisposition.ABORT\r\n raise ResumableUploadException('Got response code %d while attempting '\r\n 'upload (%s)' %\r\n (resp.status, resp.reason), disposition)", "def _start_new_resumable_upload(self, key, headers=None):\r\n conn = key.bucket.connection\r\n if conn.debug >= 1:\r\n print 'Starting new resumable upload.'\r\n self.server_has_bytes = 0\r\n\r\n # Start a new resumable upload by sending a POST request with an\r\n # empty body and the \"X-Goog-Resumable: start\" header. Include any\r\n # caller-provided headers (e.g., Content-Type) EXCEPT Content-Length\r\n # (and raise an exception if they tried to pass one, since it's\r\n # a semantic error to specify it at this point, and if we were to\r\n # include one now it would cause the server to expect that many\r\n # bytes; the POST doesn't include the actual file bytes We set\r\n # the Content-Length in the subsequent PUT, based on the uploaded\r\n # file size.\r\n post_headers = {}\r\n for k in headers:\r\n if k.lower() == 'content-length':\r\n raise ResumableUploadException(\r\n 'Attempt to specify Content-Length header (disallowed)',\r\n ResumableTransferDisposition.ABORT)\r\n post_headers[k] = headers[k]\r\n post_headers[conn.provider.resumable_upload_header] = 'start'\r\n\r\n resp = conn.make_request(\r\n 'POST', key.bucket.name, key.name, post_headers)\r\n # Get tracker URI from response 'Location' header.\r\n body = resp.read()\r\n\r\n # Check for various status conditions.\r\n if resp.status in [500, 503]:\r\n # Retry status 500 and 503 errors after a delay.\r\n raise ResumableUploadException(\r\n 'Got status %d from attempt to start resumable upload. '\r\n 'Will wait/retry' % resp.status,\r\n ResumableTransferDisposition.WAIT_BEFORE_RETRY)\r\n elif resp.status != 200 and resp.status != 201:\r\n raise ResumableUploadException(\r\n 'Got status %d from attempt to start resumable upload. '\r\n 'Aborting' % resp.status,\r\n ResumableTransferDisposition.ABORT)\r\n\r\n # Else we got 200 or 201 response code, indicating the resumable\r\n # upload was created.\r\n tracker_uri = resp.getheader('Location')\r\n if not tracker_uri:\r\n raise ResumableUploadException(\r\n 'No resumable tracker URI found in resumable initiation '\r\n 'POST response (%s)' % body,\r\n ResumableTransferDisposition.WAIT_BEFORE_RETRY)\r\n self._set_tracker_uri(tracker_uri)\r\n self._save_tracker_uri_to_file()", "def upload(files, session, samples_resource, server_url, threads=DEFAULT_UPLOAD_THREADS,\n validate=True, log_to=None, metadata=None, tags=None):\n if threads is None:\n threads = 1\n\n filenames = []\n file_sizes = []\n for file_path in files:\n normalized_filename, file_size = _file_stats(file_path, validate=validate)\n filenames.append(normalized_filename)\n file_sizes.append(file_size)\n\n # set up the logging\n bar_length = 20\n if log_to is not None:\n log_to.write('Uploading: Preparing upload(s)... ')\n log_to.flush()\n\n overall_size = sum(file_sizes)\n validated_sizes = {filename: 0 for filename in filenames}\n transferred_sizes = {filename: 0 for filename in filenames}\n\n # TODO: we should use click.progressbar?\n def progress_bar_display(file_id, bytes_transferred, validation=False):\n validation_in_progress = sum(validated_sizes.values()) != overall_size\n if validation and validation_in_progress:\n # Validating mode\n prev_progress = sum(validated_sizes.values()) / overall_size\n validated_sizes[file_id] = bytes_transferred\n progress = sum(validated_sizes.values()) / overall_size\n else:\n # Uploading mode\n prev_progress = sum(transferred_sizes.values()) / overall_size\n transferred_sizes[file_id] = bytes_transferred\n progress = sum(transferred_sizes.values()) / overall_size\n\n if floor(100 * prev_progress) == floor(100 * progress):\n return\n\n block = int(round(bar_length * progress))\n bar = '#' * block + '-' * (bar_length - block)\n if validation and validation_in_progress:\n log_to.write('\\rValidating: [{}] {:.0f}% '.format(bar, progress * 100))\n elif progress != 1:\n log_to.write('\\rUploading: [{}] {:.0f}% '.format(bar, progress * 100))\n else:\n log_to.write('\\rUploading: Finalizing upload... ')\n log_to.flush()\n\n progress_bar = None if log_to is None else progress_bar_display\n\n # first, upload all the smaller files in parallel (if multiple threads are requested)\n uploading_uuids = []\n if threads > 1:\n import ctypes\n thread_error = Value(ctypes.c_wchar_p, '')\n semaphore = BoundedSemaphore(threads)\n upload_threads = []\n\n def threaded_upload(*args):\n def _wrapped(*wrapped_args):\n semaphore.acquire()\n try:\n file_uuid = upload_file(*wrapped_args[:-1])\n if file_uuid:\n uploading_uuids.append(file_uuid)\n except Exception as e:\n # handle inside the thread to prevent the exception message from leaking out\n wrapped_args[-1].value = '{}'.format(e)\n raise SystemExit\n semaphore.release()\n\n # the thread error message must be the last parameter\n thread = Thread(target=_wrapped, args=args + (thread_error, ))\n thread.daemon = True\n thread.start()\n upload_threads.append(thread)\n else:\n threaded_upload = upload_file\n\n upload_threads = []\n uploading_files = []\n for file_path, filename, file_size in zip(files, filenames, file_sizes):\n if file_size < MULTIPART_SIZE:\n file_obj = _wrap_files(file_path, logger=progress_bar, validate=validate)\n file_uuid = threaded_upload(file_obj, filename, session, samples_resource, log_to,\n metadata, tags)\n if file_uuid:\n uploading_uuids.append(file_uuid)\n uploading_files.append(file_obj)\n\n if threads > 1:\n # we need to do this funky wait loop to ensure threads get killed by ctrl-c\n while True:\n for thread in upload_threads:\n # hopefully no one has a <5Gb file that takes longer than a week to upload\n thread.join(604800)\n if all(not thread.is_alive() for thread in upload_threads):\n break\n if thread_error.value != '':\n raise UploadException(thread_error.value)\n\n # lastly, upload all the very big files sequentially\n for file_path, filename, file_size in zip(files, filenames, file_sizes):\n if file_size >= MULTIPART_SIZE:\n file_obj = _wrap_files(file_path, logger=progress_bar, validate=validate)\n upload_large_file(file_obj, filename, session, samples_resource, server_url,\n threads=threads, log_to=log_to)\n file_obj.close()\n\n if log_to is not None:\n log_to.write('\\rUploading: All complete.' + (bar_length - 3) * ' ' + '\\n')\n log_to.flush()\n\n return uploading_uuids", "def check_upload(box_name, upload_threshold):\n\n try:\n headers = {'Cache-Control': 'no-cache'}\n response = requests.get(API_URL.format(box_name), headers=headers)\n response.raise_for_status()\n except Exception as e:\n print('{} failed getting health from backend: {}'.format(box_name, e))\n return UNKNOWN\n\n try:\n health = response.json()\n except Exception as e:\n print('{} failed decoding health response: {}'.format(box_name, e))\n return UNKNOWN\n\n if 'last_upload' not in health:\n print('{} last_upload not found in health results'.format(box_name))\n return UNKNOWN\n \n last_upload = datetime.now(tzutc()) - parser.parse(health['last_upload'])\n\n if last_upload.total_seconds() > upload_threshold:\n print('{} last uploaded {}'.format(box_name, naturaltime(last_upload)))\n return CRITICAL\n\n print('{} last upload {}'.format(box_name, naturaltime(last_upload)))\n return OK", "def upload(self, dest, overwrite=False):\n dest = normpath(dest)\n try:\n remote = get_remote(dest)\n except ValueError: # Nothing exists at dest, nothing to worry about.\n pass\n else: # Something exists here.\n if isinstance(remote, RemoteFile) and self.hash() == remote.hash:\n # Nothing to update.\n pdbox.info(\"%s and %s are identical\" % (self.path, remote.uri))\n return\n if not overwrite:\n raise ValueError(\"%s exists\" % remote.uri)\n\n # Uploading can either happen all at once (with a 150 MB limit),\n # or in chunks. If the file is smaller than the selected chunk size,\n # then try to upload in one go.\n chunksize = min(pdbox._args.get(\"chunksize\", 149.0), 149.0)\n pdbox.debug(\"Chunk size: %.2f MB\" % chunksize)\n if pdbox._args.get(\"dryrun\"):\n pdbox.info(\"Uploaded %s to %s\" % (self.path, dbx_uri(dest)))\n return None\n\n # Set the write mode.\n if overwrite:\n mode = dropbox.files.WriteMode.overwrite\n else:\n mode = dropbox.files.WriteMode.add\n\n chunk = int(chunksize * 1024 * 1024) # Convert B to MB.\n\n with open(self.path, \"rb\") as f:\n data = f.read()\n sz = len(data)\n\n # TODO: Progress bars.\n if sz < chunk: # One-shot upload.\n meta = execute(pdbox.dbx.files_upload, data, dest, mode)\n else: # Multipart upload.\n nchunks = math.ceil(sz / chunk)\n # Initiate the upload with just the first byte.\n start = execute(pdbox.dbx.files_upload_session_start, f[0])\n cursor = dropbox.files.UploadSessionCursor(start.session_id, 1)\n\n # Now just add each chunk.\n while sz - cursor.offset > chunk:\n pdbox.debug(\n \"Uploading chunk %d/%d\" % (cursor.offset % chunk, nchunks),\n )\n execute(\n pdbox.dbx.files_upload_session_append_v2,\n data[cursor.offset:cursor.offset + chunk],\n cursor,\n )\n cursor.offset += chunk\n\n # Upload the remaining to finish the transaction.\n meta = execute(\n pdbox.dbx.files_upload_session_finish,\n data[cursor.offset:],\n dropbox.files.CommitInfo(dest, mode),\n )\n\n pdbox.info(\"Uploaded %s to %s\" % (self.path, dbx_uri(dest)))\n return RemoteFile(None, meta=meta)", "def UploadFile(self, local_file_name, gcs_file_name,\n mimetype='application/octet-stream'):\n resumable = os.stat(local_file_name).st_size > 0\n media = gapi_http.MediaFileUpload(local_file_name,\n mimetype=mimetype,\n resumable=resumable)\n\n # gsutil's code suggests that 404s and 410s are retryable for resumable\n # uploads (see ResumableUploadStartOverException).\n def _ErrorMatcher(error):\n return (self._CommonErrorMatcher(error)\n or (isinstance(error, gapi_errors.HttpError)\n and error.resp.status in (404, 410)))\n\n return self._RunWithRetries(\n lambda: self._UploadWithProgress(media, gcs_file_name),\n _ErrorMatcher)", "def file_transfer(\n filename: str,\n filesize: int,\n packetsize: int = 1000,\n timeout: int = 2000,\n mode: Optional[str] = None,\n) -> None:", "def __call__(self, name, post_check_hook=None):\n def do_upload():\n video_data, filetype = self.read_file(name)\n\n ticket_id, upload_uri, complete_uri = self.get_upload_ticket()\n log.info(\"Ticket ID: %s\" % ticket_id)\n\n _range = 0\n hook_break = False\n while _range < len(video_data) and hook_break != True:\n self.upload_segment(upload_uri, _range, video_data, filetype or 'mp4')\n _range = self.get_last_uploaded_byte(upload_uri)\n # hook is passed the range, breaks retry cycle if it returns True\n if post_check_hook:\n hook_break = post_check_hook(_range)\n\n log.info(\"Upload completed\")\n return self.delete_upload_ticket(complete_uri)\n\n return do_upload()", "def upload_file(self, file_upload_parameters, progress=None):\n\n file_upload_parameters._submit_upload_parameters.timeout_in_milliseconds = file_upload_parameters.timeout_in_milliseconds\n operation = self.submit_upload(file_upload_parameters._submit_upload_parameters)\n return self.download_upload_result(operation, file_upload_parameters, progress)", "def upload_with_nginx_upload_progress(request):\n input_file, file_size, filename = get_file_from_request(request)\n upload_dir = request.registry.settings['poulda.upload_dir']\n path = os.path.join(upload_dir, filename)\n with open(path, 'w') as output:\n # We must read only 'file_size' bytes from the 'input_file',\n # not all of it since it also contains the MIME boundary.\n copy_to_file(input_file, file_size, output)\n return HTTPFound(location='success')", "def upload_complete(self, path, url, quiet):\r\n file_size = os.path.getsize(path)\r\n try:\r\n with tqdm(total=file_size,\r\n unit='B',\r\n unit_scale=True,\r\n unit_divisor=1024,\r\n disable=quiet) as progress_bar:\r\n with io.open(path, 'rb', buffering=0) as fp:\r\n reader = TqdmBufferedReader(fp, progress_bar)\r\n session = requests.Session()\r\n retries = Retry(total=10, backoff_factor=0.5)\r\n adapter = HTTPAdapter(max_retries=retries)\r\n session.mount('http://', adapter)\r\n session.mount('https://', adapter)\r\n response = session.put(url, data=reader)\r\n except Exception as error:\r\n print(error)\r\n return False\r\n return response.status_code == 200 or response.status_code == 201", "def reaper(self):\n if not self.superuser_request:\n self.abort(402, 'uploads must be from an authorized drone')\n with tempfile.TemporaryDirectory(prefix='.tmp', dir=config.get_item('persistent', 'data_path')) as tempdir_path:\n try:\n file_store = files.FileStore(self.request, tempdir_path)\n except files.FileStoreException as e:\n self.abort(400, str(e))\n now = datetime.datetime.utcnow()\n fileinfo = dict(\n name=file_store.filename,\n created=now,\n modified=now,\n size=file_store.size,\n hash=file_store.hash,\n tags=file_store.tags,\n metadata=file_store.metadata\n )\n container = reaperutil.create_container_hierarchy(file_store.metadata)\n f = container.find(file_store.filename)\n target_path = os.path.join(config.get_item('persistent', 'data_path'), util.path_from_hash(fileinfo['hash']))\n if not f:\n file_store.move_file(target_path)\n container.add_file(fileinfo)\n rules.create_jobs(config.db, container.acquisition, 'acquisition', fileinfo)\n elif not file_store.identical(util.path_from_hash(fileinfo['hash']), f['hash']):\n file_store.move_file(target_path)\n container.update_file(fileinfo)\n rules.create_jobs(config.db, container.acquisition, 'acquisition', fileinfo)\n throughput = file_store.size / file_store.duration.total_seconds()\n log.info('Received %s [%s, %s/s] from %s' % (file_store.filename, util.hrsize(file_store.size), util.hrsize(throughput), self.request.client_addr))", "def upload(request):\n if request.method != \"POST\":\n return probe(request)\n\n md5chunk = request.args.get('md5chunk', False)\n md5total = request.args.get('md5total', False)\n\n chunk = int(request.args.get('chunk', 0))\n chunks = int(request.args.get('chunks', 0))\n\n if md5chunk and md5total:\n filename = upload_with_checksum(request, md5chunk, md5total, chunk, chunks)\n else:\n filename = upload_simple(request, chunk)\n\n return Response('%s uploaded' % filename)", "def upload_file(self, file, max_chunk_size=1024 * 1024, parallelism=10):\n\n upload_key = self.internal.media_and_files.GetFileUploadUrl(\n media_and_files_pb2.RequestGetFileUploadUrl(\n expected_size=os.path.getsize(file)\n )\n ).upload_key\n\n with ThreadPoolExecutor(max_workers=parallelism) as executor:\n result = list(\n executor.map(\n lambda x: self.upload_file_chunk(*x),\n (\n (part_number, chunk, upload_key) for part_number, chunk in enumerate(\n read_file_in_chunks(file, max_chunk_size)\n )\n )\n\n )\n )\n\n if not all(result):\n return None\n\n return self.internal.media_and_files.CommitFileUpload(\n media_and_files_pb2.RequestCommitFileUpload(\n upload_key=upload_key,\n file_name=os.path.basename(file)\n )\n ).uploaded_file_location", "async def tus_upload_part(request: web.Request) -> web.Response:\n ctx: Context = request.app[\"ctx\"]\n secret = ctx.local_config[\"storage-proxy\"][\"secret\"]\n async with check_params(\n request,\n t.Dict(\n {\n t.Key(\"token\"): tx.JsonWebToken(\n secret=secret, inner_iv=upload_token_data_iv\n ),\n }\n ),\n read_from=CheckParamSource.QUERY,\n ) as params:\n token_data = params[\"token\"]\n async with ctx.get_volume(token_data[\"volume\"]) as volume:\n headers = await prepare_tus_session_headers(request, token_data, volume)\n vfpath = volume.mangle_vfpath(token_data[\"vfid\"])\n upload_temp_path = vfpath / \".upload\" / token_data[\"session\"]\n\n async with AsyncFileWriter(\n target_filename=upload_temp_path,\n access_mode=\"ab\",\n max_chunks=DEFAULT_INFLIGHT_CHUNKS,\n ) as writer:\n while not request.content.at_eof():\n chunk = await request.content.read(DEFAULT_CHUNK_SIZE)\n await writer.write(chunk)\n\n current_size = Path(upload_temp_path).stat().st_size\n if current_size >= int(token_data[\"size\"]):\n target_path = vfpath / token_data[\"relpath\"]\n upload_temp_path.rename(target_path)\n try:\n loop = asyncio.get_running_loop()\n await loop.run_in_executor(\n None, lambda: upload_temp_path.parent.rmdir()\n )\n except OSError:\n pass\n headers[\"Upload-Offset\"] = str(current_size)\n return web.Response(status=204, headers=headers)", "def _direct_upload(file_obj, file_name, fields, session, samples_resource):\n\n # need an OrderedDict to preserve field order for S3, required for Python 2.7\n multipart_fields = OrderedDict()\n\n for k, v in fields[\"additional_fields\"].items():\n multipart_fields[str(k)] = str(v)\n\n # this attribute is only in FASTXInterleave and FilePassthru\n mime_type = getattr(file_obj, \"mime_type\", \"text/plain\")\n multipart_fields[\"file\"] = (file_name, file_obj, mime_type)\n encoder = MultipartEncoder(multipart_fields)\n upload_request = None\n\n try:\n upload_request = session.post(\n fields[\"upload_url\"],\n data=encoder,\n headers={\"Content-Type\": encoder.content_type},\n auth={},\n )\n except requests.exceptions.ConnectionError:\n pass\n\n # If we expect a status *always* try to check it,\n # waiting up to 4 hours for buffering to complete (~30-50GB file gzipped)\n if \"status_url\" in fields[\"additional_fields\"]:\n now = time.time()\n while time.time() < (now + 60 * 60 * 4):\n try:\n resp = session.post(\n fields[\"additional_fields\"][\"status_url\"],\n json={\"sample_id\": fields[\"sample_id\"]},\n )\n resp.raise_for_status()\n except (ValueError, requests.exceptions.RequestException) as e:\n log.debug(\"Retrying due to error: {}\".format(e))\n raise RetryableUploadException(\n \"Unexpected failure of direct upload proxy. Retrying...\"\n )\n\n if resp.json() and resp.json().get(\"complete\", True) is False:\n log.debug(\"Blocking on waiting for proxy to complete (in progress)...\")\n time.sleep(30)\n else:\n break\n\n # Return is successfully processed\n if resp.json().get(\"code\") in [200, 201]:\n file_obj.close()\n return\n elif resp.json().get(\"code\") == 500:\n log.debug(\"Retrying due to 500 from proxy...\")\n raise RetryableUploadException(\"Unexpected issue with direct upload proxy. Retrying...\")\n else:\n raise_api_error(resp, state=\"upload\")\n\n # Direct to S3 case\n else:\n file_obj.close()\n if upload_request.status_code not in [200, 201]:\n raise UploadException(\"Unknown connectivity issue with direct upload.\")\n\n # Issue a callback -- this only happens in the direct-to-S3 case\n try:\n if not fields[\"additional_fields\"].get(\"callback_url\"):\n samples_resource.confirm_upload(\n {\"sample_id\": fields[\"sample_id\"], \"upload_type\": \"standard\"}\n )\n except requests.exceptions.HTTPError as e:\n raise_api_error(e.response, state=\"callback\")\n except requests.exceptions.ConnectionError:\n raise_connectivity_error(file_name)", "def _put(self, source_path, remote_filename):\n\n quota = self.http_client.get(self.metadata_url + 'account/quota')\n quota.raise_for_status()\n available = quota.json()['available']\n\n source_size = os.path.getsize(source_path.name)\n\n if source_size > available:\n raise BackendException(\n 'Out of space: trying to store \"%s\" (%d bytes), but only '\n '%d bytes available on Amazon Drive.' % (\n source_path.name, source_size, available))\n\n # Just check the cached list, to avoid _list for every new file being\n # uploaded\n if remote_filename in self.names_to_ids:\n log.Debug('File %s seems to already exist on Amazon Drive. Deleting '\n 'before attempting to upload it again.' % remote_filename)\n self._delete(remote_filename)\n\n metadata = {'name': remote_filename, 'kind': 'FILE',\n 'parents': [self.backup_target_id]}\n headers = {'Content-Type': 'multipart/form-data; boundary=%s'\n % self.MULTIPART_BOUNDARY}\n data = self.multipart_stream(metadata, source_path)\n\n response = self.http_client.post(\n self.content_url + 'nodes?suppress=deduplication',\n data=data,\n headers=headers)\n\n if response.status_code == 409: # \"409 : Duplicate file exists.\"\n self.raise_for_existing_file(remote_filename)\n elif response.status_code == 201:\n log.Debug('%s uploaded successfully' % remote_filename)\n elif response.status_code == 408 or response.status_code == 504:\n log.Info('%s upload failed with timeout status code=%d. Speculatively '\n 'waiting for %d seconds to see if Amazon Drive finished the '\n 'upload anyway' % (remote_filename, response.status_code,\n globals.timeout))\n tries = globals.timeout / 15\n while tries >= 0:\n tries -= 1\n time.sleep(15)\n\n remote_size = self._query(remote_filename)['size']\n if source_size == remote_size:\n log.Debug('Upload turned out to be successful after all.')\n return\n elif remote_size == -1:\n log.Debug('Uploaded file is not yet there, %d tries left.'\n % (tries + 1))\n continue\n else:\n self.raise_for_existing_file(remote_filename)\n raise BackendException('%s upload failed and file did not show up '\n 'within time limit.' % remote_filename)\n else:\n log.Debug('%s upload returned an undesirable status code %s'\n % (remote_filename, response.status_code))\n response.raise_for_status()\n\n parsed = response.json()\n if 'id' not in parsed:\n raise BackendException('%s was uploaded, but returned JSON does not '\n 'contain ID of new file. Retrying.\\nJSON:\\n\\n%s'\n % (remote_filename, parsed))\n\n # XXX: The upload may be considered finished before the file shows up\n # in the file listing. As such, the following is required to avoid race\n # conditions when duplicity calls _query or _list.\n self.names_to_ids[parsed['name']] = parsed['id']", "def handle_request_upload(self, msg):\n\n\t\tdirect_response = not msg.arguments or msg.arguments[0] in ('', '/')\n\t\tresult = []\n\t\tfor file_obj in msg.options:\n\t\t\ttmpfilename, filename, name = file_obj['tmpfile'], file_obj['filename'], file_obj['name']\n\n\t\t\t# limit files to tmpdir\n\t\t\tif not os.path.realpath(tmpfilename).startswith(TEMPUPLOADDIR):\n\t\t\t\traise BadRequest('invalid file: invalid path')\n\n\t\t\t# check if file exists\n\t\t\tif not os.path.isfile(tmpfilename):\n\t\t\t\traise BadRequest('invalid file: file does not exists')\n\n\t\t\t# don't accept files bigger than umc/server/upload/max\n\t\t\tst = os.stat(tmpfilename)\n\t\t\tmax_size = int(ucr.get('umc/server/upload/max', 64)) * 1024\n\t\t\tif st.st_size > max_size:\n\t\t\t\tos.remove(tmpfilename)\n\t\t\t\traise BadRequest('filesize is too large, maximum allowed filesize is %d' % (max_size,))\n\n\t\t\tif direct_response:\n\t\t\t\twith open(tmpfilename) as buf:\n\t\t\t\t\tb64buf = base64.b64encode(buf.read())\n\t\t\t\tresult.append({'filename': filename, 'name': name, 'content': b64buf})\n\n\t\tif direct_response:\n\t\t\tself.finished(msg.id, result)\n\t\telse:\n\t\t\tself.handle_request_command(msg)", "def upload(connection, server_pub_key, priv_key, max_message_size):\r\n\r\n while True:\r\n # Get file name from user\r\n file_path = input('Which file would you like to send to the server?: ')\r\n\r\n # Verify file exists\r\n if os.path.isfile(file_path) is True:\r\n break\r\n\r\n # File doesn't exist\r\n else:\r\n print('Could not find specified file, please try again', file=sys.stderr)\r\n\r\n try:\r\n # Tell server file is being sent\r\n connection.sendall(rsa.encrypt(b'UPLOAD', server_pub_key))\r\n time.sleep(1)\r\n connection.sendall(rsa.encrypt(str.encode(file_path), server_pub_key))\r\n time.sleep(1)\r\n\r\n # Tell the server the file size of the file attempting to be uploaded\r\n connection.sendall(rsa.encrypt(str(os.path.getsize(file_path)).encode(), server_pub_key))\r\n time.sleep(1)\r\n\r\n # Get requirement from server\r\n data = rsa.decrypt(connection.recv(1024), priv_key)\r\n if data == b'PERMISSION CHECK':\r\n security_level = input('What security level should the file have?: ')\r\n connection.sendall(rsa.encrypt(security_level.encode(), server_pub_key))\r\n\r\n elif data == b'TRAVERSAL':\r\n print('Failed attempting to upload file outside of scope', file=sys.stderr)\r\n return\r\n\r\n elif data == b'SIZE EXCEEDED':\r\n print('Maximum storage exceeded', file=sys.stderr)\r\n return\r\n\r\n else:\r\n print('Unexpected response from server', file=sys.stderr)\r\n return\r\n\r\n # Attempt to upload file to the server\r\n status = rsa.decrypt(connection.recv(1024), priv_key)\r\n if status == b'CONTINUE':\r\n\r\n # Send the file to the server\r\n shared.send_file(connection, server_pub_key, file_path, max_message_size)\r\n\r\n # Get the result from the server\r\n result = rsa.decrypt(connection.recv(1024), priv_key)\r\n\r\n # If success\r\n if result == b'SUCCESS':\r\n print('Successfully added file to the storage system')\r\n\r\n # If failure\r\n elif result == b'FAILURE':\r\n print('Failed to add file to the storage system')\r\n\r\n # Attempt to overwrite file on the server\r\n elif status == b'OVERWRITE':\r\n\r\n # Ask the user if they would like to overwrite the file on the server\r\n while True:\r\n overwrite = input('Would you like to overwrite the file on the server with the same name?\\n'\r\n '1 - Yes\\n'\r\n '2 - No\\n'\r\n 'Choice: ')\r\n\r\n # Overwrite file\r\n if overwrite == '1':\r\n connection.sendall(rsa.encrypt(b'YES', server_pub_key))\r\n break\r\n\r\n # Don't overwrite file\r\n elif overwrite == '2':\r\n connection.sendall((rsa.encrypt(b'NO', server_pub_key)))\r\n return\r\n\r\n # Invalid input\r\n else:\r\n print('Invalid input, please select an available option', file=sys.stderr)\r\n\r\n # Send the file to the server\r\n shared.send_file(connection, server_pub_key, file_path, max_message_size)\r\n\r\n # Get the result from the server\r\n result = rsa.decrypt(connection.recv(1024), priv_key)\r\n\r\n # If success\r\n if result == b'SUCCESS':\r\n print('Successfully added file to the storage system')\r\n\r\n # If failure\r\n elif result == b'FAILURE':\r\n print('Failed to add file to the storage system')\r\n\r\n else:\r\n print('Failed to upload file to the server with desired security level', file=sys.stderr)\r\n\r\n # Catch file not found\r\n except FileNotFoundError:\r\n print(''.join(['\\nCould not find the file ', file_path]), file=sys.stderr)\r\n connection.sendall(rsa.encrypt(b'MISSING', server_pub_key))", "def upload(self, requests, file):\n # Set the source and dest paths\n dest_url = self.base_url + '/upload'\n source_path = os.path.join(self.data_dir, file)\n\n # Get the sha256 hash of the file\n with open(source_path, 'rb') as afile:\n beforeDigest = hashlib.sha256(afile.read()).hexdigest()\n\n print(\"Generated a hash of the temp file: \" + beforeDigest)\n\n # Upload the file and time it\n with open(source_path, 'rb') as f:\n startTime = time.time()\n r = requests.post(dest_url, files={'file': (file, f)}, max_price=5)\n endTime = time.time()\n uploadElapsedTime = endTime - startTime\n\n print(\"Uploaded the file. Elapsed time: \" + str(uploadElapsedTime))\n\n # Verify the upload was successful\n if r.json()['success'] is not True:\n return {'success': False}\n\n retVal = {\n 'success': True,\n 'time': uploadElapsedTime,\n 'digest': beforeDigest,\n 'upload_filename': r.json()['filename']\n }\n\n return retVal", "def upload_large_file(file_obj, filename, session, samples_resource, server_url, threads=10,\n log_to=None):\n import boto3\n from boto3.s3.transfer import TransferConfig\n from boto3.exceptions import S3UploadFailedError\n\n # first check with the one codex server to get upload parameters\n try:\n upload_params = samples_resource.init_multipart_upload()\n except requests.exceptions.HTTPError:\n raise UploadException('Could not initiate upload with One Codex server')\n\n callback_url = server_url.rstrip('/') + upload_params['callback_url']\n access_key = upload_params['upload_aws_access_key_id']\n secret_key = upload_params['upload_aws_secret_access_key']\n\n # actually do the upload\n client = boto3.client('s3', aws_access_key_id=access_key, aws_secret_access_key=secret_key)\n # TODO: this automatically uses 10 threads, but we'd probably like it to be configurable\n config = TransferConfig(max_concurrency=threads)\n try:\n client.upload_fileobj(file_obj, upload_params['s3_bucket'], upload_params['file_id'],\n ExtraArgs={'ServerSideEncryption': 'AES256'}, Config=config)\n except S3UploadFailedError:\n raise UploadException(\"Upload of %s has failed. Please contact [email protected] \"\n \"if you experience further issues\" % filename)\n\n # return completed status to the one codex server\n s3_path = 's3://{}/{}'.format(upload_params['s3_bucket'], upload_params['file_id'])\n req = session.post(callback_url, json={'s3_path': s3_path, 'filename': filename})\n\n if req.status_code != 200:\n raise UploadException(\"Upload confirmation of %s has failed. Please contact \"\n \"[email protected] if you experience further issues\" % filename)\n if log_to is not None:\n log_to.write('\\rUploading: {} finished.\\n'.format(filename))\n log_to.flush()", "def upload(self, fullfilename, remotefolder=None):\n print(\"[Remote Server] Uploading %s to %s:%s\" %(fullfilename, self.server, self.remotefolder))\n\n if not self.status:\n return 0\n\n if remotefolder == None:\n remotefolder = self.remotefolder\n\n if not self.cd(remotefolder):\n return 0\n\n if not self.sendFile(fullfilename):\n print(\"[Remote Server] Error uploading file %s\" %fullfilename)\n return 0\n\n print(\"[Remote Server] upload finished successfully\")\n\n return 1", "def upload(conn, localpath, remotepath, filter = None, ignore_invalid = False, chunk_size = 16000):\n if os.path.isdir(localpath):\n upload_dir(conn, localpath, remotepath, filter, chunk_size)\n elif os.path.isfile(localpath):\n upload_file(conn, localpath, remotepath, chunk_size)\n else:\n if not ignore_invalid:\n raise ValueError(\"cannot upload %r\" % (localpath,))", "def upload_file(url, filename, metadata={}):\n while True:\n files = {'dataFile': open(filename, 'rb')}\n r = requests.post(url, files=files, data=metadata)\n\n # Check if everything went good\n if r.status_code is 200:\n res = r.json()\n\n if res['success'] and md5_checksum(filename) == res['md5']:\n print('[ OK ] File uploaded successfully!')\n return True\n else:\n print('[ WARNING ] Something went werong. Trying again...')\n time.sleep(10)\n else:\n print('[ ERROR ] Connection error. Status code: {}'.format(r.status_code))", "def upload(self):\n while not self._upload_queue.empty():\n logger.info('%d files left to upload', self._upload_queue.qsize())\n self._sia_condition_waiter.wait_for_available_upload_slot()\n job = self._upload_queue.get()\n if (not self._process_upload_job_async(job)) and (job.failure_count\n < 3):\n self._upload_queue.put(job)\n self._sia_condition_waiter.wait_for_all_uploads_to_complete()\n self._exit_event.set()" ]
[ "0.64338714", "0.59548914", "0.57437813", "0.5662042", "0.558884", "0.55833423", "0.55503416", "0.5455731", "0.53347987", "0.52953595", "0.52573204", "0.5241666", "0.5155896", "0.5148159", "0.5137368", "0.5097519", "0.5084955", "0.50364286", "0.50277656", "0.5009872", "0.50070083", "0.49945274", "0.49784875", "0.49671853", "0.4961588", "0.49455816", "0.49313354", "0.49299148", "0.49275425", "0.49203086" ]
0.6702616
0
Expands URI wildcarding, objectless bucket names, and directory names.
def ExpandWildcardsAndContainers(self, uri_strs, sub_opts=None, headers=None, debug=0): # The algorithm we use is: # 1. Build a first level expanded list from uri_strs consisting of all # URIs that aren't file wildcards, plus expansions of the file wildcards. # 2. Build dict from above expanded list. # We do so that we can properly handle the following example: # gsutil cp file0 dir0 gs://bucket # where dir0 contains file1 and dir1/file2. # If we didn't do the first expansion, this cp command would end up # with this expansion: # {file://file0:[file://file0],file://dir0:[file://dir0/file1, # file://dir0/dir1/file2]} # instead of the (correct) expansion: # {file://file0:[file://file0],file://dir0/file1:[file://dir0/file1], # file://dir0/dir1:[file://dir0/dir1/file2]} # The latter expansion is needed so that in the "Copying..." loop of # CopyObjsCommand we know that dir0 was being copied, so we create an # object called gs://bucket/dir0/dir1/file2. (Otherwise it would look # like a single file was being copied, so we'd create an object called # gs://bucket/file2.) should_recurse = False if sub_opts: for o, unused_a in sub_opts: if o == '-r' or o == '-R': should_recurse = True # Step 1. uris_to_expand = [] for uri_str in uri_strs: uri = self.StorageUri(uri_str, debug=debug, validate=False) if uri.is_file_uri() and ContainsWildcard(uri_str): uris_to_expand.extend(list( self.CmdWildcardIterator(uri, headers=headers, debug=debug))) else: uris_to_expand.append(uri) # Step 2. result = {} for uri in uris_to_expand: if uri.names_container(): if not should_recurse: if uri.is_file_uri(): desc = 'directory' else: desc = 'bucket' print 'Omitting %s "%s".' % (desc, uri.uri) result[uri] = [] continue if uri.is_file_uri(): # dir -> convert to implicit recursive wildcard. uri_to_iter = '%s/**' % uri.uri else: # bucket -> convert to implicit wildcard. uri_to_iter = uri.clone_replace_name('*') else: uri_to_iter = uri result[uri] = list(self.CmdWildcardIterator( uri_to_iter, headers=headers, debug=debug)) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def expand(obj):\n return (obj + '*', obj + '*:*', obj + '*:*:*')", "def CmdWildcardIterator(self, uri_or_str, result_type=ResultType.URIS,\n headers=None, debug=0):\n return wildcard_iterator.wildcard_iterator(\n uri_or_str, result_type=result_type, headers=headers, debug=debug,\n bucket_storage_uri_class=self.bucket_storage_uri_class)", "def InsistUriNamesContainer(self, command, uri):\n if uri.names_singleton():\n raise CommandException('Destination StorageUri must name a bucket or '\n 'directory for the\\nmultiple source form of the '\n '\"%s\" command.' % command)", "def HandleMultiSrcCopyRequst(self, src_uri_expansion, dst_uri):\n # If src_uri and dst_uri both name containers, handle\n # two cases to make copy command work like UNIX \"cp -r\" works:\n # a) if dst_uri names a non-existent directory, copy objects to a new\n # directory with the dst_uri name. In this case,\n # gsutil gs://bucket/a dir\n # should create dir/a.\n # b) if dst_uri names an existing directory, copy objects under that\n # directory. In this case,\n # gsutil gs://bucket/a dir\n # should create dir/bucket/a.\n src_uri_to_check = src_uri_expansion.keys()[0]\n if (src_uri_to_check.names_container() and dst_uri.names_container() and\n os.path.exists(dst_uri.object_name)):\n new_name = ('%s%s%s' % (dst_uri.object_name, os.sep,\n src_uri_to_check.bucket_name)).rstrip('/')\n dst_uri = dst_uri.clone_replace_name(new_name)\n # Create dest directory if needed.\n if dst_uri.is_file_uri() and not os.path.exists(dst_uri.object_name):\n os.makedirs(dst_uri.object_name)\n return dst_uri", "def __extend_uri(prefixes, short):\n for prefix in prefixes:\n if short.startswith(prefix):\n return short.replace(prefix + ':', prefixes[prefix])\n return short", "def _object_path(self, name: str) -> str:\n prefix = name[:2]\n suffix = name[2:]\n return posixpath.join(self._path, \"objects\", prefix, suffix)", "def rename_keys_on_s3(bucket_name, bucket_region, prefix_root, prefix_modification_func,\n filter_keys_func=None):\n if prefix_modification_func is None:\n raise Exception('There should be a modification function specified')\n\n boto3_client = boto3.client('s3', bucket_region)\n\n paginator = boto3_client.get_paginator('list_objects')\n pageresponse = paginator.paginate(Bucket=bucket_name, Prefix=prefix_root)\n for page_of_keys in pageresponse:\n for current_key in page_of_keys['Contents']:\n current_prefix = current_key['Key']\n if filter_keys_func is None or filter_keys_func(current_prefix):\n logger.info('Skipped prefix {}'.format(current_prefix))\n continue\n try:\n new_prefix_name = prefix_modification_func(current_prefix)\n rename_s3_key(boto3_client, bucket_name, current_prefix, new_prefix_name)\n except (ClientError, AttributeError) as e:\n logger.error('Unable to rename key prefix {}, {}'.format(current_prefix, e[0]))", "def clone_replace_name(self, new_name):\r\n if not self.bucket_name:\r\n raise InvalidUriError('clone_replace_name() on bucket-less URI %s' %\r\n self.uri)\r\n return BucketStorageUri(self.scheme, self.bucket_name, new_name,\r\n self.debug)", "async def glob(self):\n # get the path string up to the wildcards\n try:\n pi1 = self._path.index(\"*\")\n except ValueError:\n pi1 = len(self._path)\n try:\n pi2 = self._path.index(\"?\")\n except ValueError:\n pi2 = len(self._path)\n pi = min(pi1, pi2)\n # using the prefix will cut down on the search space\n prefix = self._path[:pi]\n # get the wildcard\n wildcard = self._path[pi:]\n # set up the paginator\n paginator = self._conn_obj.conn.get_paginator(\"list_objects_v2\")\n parameters = {\n 'Bucket': self._bucket,\n 'Prefix': prefix\n }\n page_iterator = paginator.paginate(**parameters)\n files = []\n async for page in page_iterator:\n for item in page.get('Contents', []):\n fname = item['Key']\n # check that it matches against wildcard\n if fnmatch(fname, wildcard):\n files.append(item['Key'])\n return files", "def blob_generator(bucket_name, pattern):\n cloud_bucket = get_gcsbucket(bucket_name)\n for blob in cloud_bucket.objects():\n if blob.key.endswith(pattern):\n yield blob.uri", "def ListCommand(self, args, sub_opts=None, headers=None, debug=0):\n listing_style = ListingStyle.SHORT\n get_bucket_info = False\n if sub_opts:\n for o, unused_a in sub_opts:\n if o == '-b':\n get_bucket_info = True\n if o == '-l':\n listing_style = ListingStyle.LONG\n if o == '-L':\n listing_style = ListingStyle.LONG_LONG\n if not args:\n # default to listing all gs buckets\n args = ['gs://']\n\n total_objs = 0\n total_bytes = 0\n for uri_str in args:\n uri = self.StorageUri(uri_str, debug=debug, validate=False)\n\n if not uri.bucket_name:\n # Provider URI: add bucket wildcard to list buckets.\n for uri in self.CmdWildcardIterator('%s://*' % uri.scheme,\n headers=headers, debug=debug):\n (bucket_objs, bucket_bytes) = self.PrintBucketInfo(uri, listing_style,\n headers=headers,\n debug=debug)\n total_bytes += bucket_bytes\n total_objs += bucket_objs\n\n elif not uri.object_name:\n if get_bucket_info:\n # ls -b request on provider+bucket URI: List info about bucket(s).\n for uri in self.CmdWildcardIterator(uri, headers=headers,\n debug=debug):\n (bucket_objs, bucket_bytes) = self.PrintBucketInfo(uri,\n listing_style,\n headers=headers,\n debug=debug)\n total_bytes += bucket_bytes\n total_objs += bucket_objs\n else:\n # ls request on provider+bucket URI: List objects in the bucket(s).\n for obj in self.CmdWildcardIterator(uri.clone_replace_name('*'),\n ResultType.KEYS,\n headers=headers, debug=debug):\n total_bytes += self.PrintObjectInfo(uri, obj, listing_style,\n headers=headers, debug=debug)\n total_objs += 1\n\n else:\n # Provider+bucket+object URI -> list the object(s).\n for obj in self.CmdWildcardIterator(uri, ResultType.KEYS,\n headers=headers, debug=debug):\n total_bytes += self.PrintObjectInfo(uri, obj, listing_style,\n headers=headers, debug=debug)\n total_objs += 1\n if listing_style != ListingStyle.SHORT:\n print ('TOTAL: %d objects, %d bytes (%s)' %\n (total_objs, total_bytes, MakeHumanReadable(float(total_bytes))))", "def _list_objects(src: str)->list:\n if _is_s3(src):\n return aws_s3_ls(src)\n else:\n if _is_dir(src):\n return _list_dir(src)\n else:\n return [src]", "def objs_with_prefix(bucket, log_type, query_time):\n prefix = get_prefix(log_type, query_time)\n # S3 guarantees to return objects in ascending key order based on the UTF-8\n # binary representation of the key. Unfortunately the server-side filtering\n # is quite limited; we can't specify the sort order or the sort key.\n objs = list(bucket.objects.filter(Prefix=prefix))\n logging.info('Found %s files with prefix %s',\n 'no' if not objs else len(objs), prefix)\n return objs", "def RemoveBucketsCommand(self, args, unused_sub_opts=None, headers=None,\n debug=0):\n # Expand bucket name wildcards, if any.\n for uri_str in args:\n for uri in self.CmdWildcardIterator(uri_str, headers=headers,\n debug=debug):\n if uri.object_name:\n raise CommandException('\"rb\" command requires a URI with no object '\n 'name')\n print 'Removing %s...' % uri\n uri.delete_bucket(headers)", "def get_custom_short_paths(content):", "def manipulate_bucketlist():\n pass", "def list_s3(bucket, prefix, ext=None):\n s3 = boto3.resource('s3')\n s3_bucket = s3.Bucket(bucket)\n\n if ext:\n ext = '.' + ext.lstrip('.')\n else:\n ext = ''\n\n for item in s3_bucket.objects.filter(Prefix=prefix):\n key = item.key\n if not key.endswith(ext):\n continue\n\n yield key", "def canonicalPath(path_or_object):", "def pathfor( name, **matchdict ) :", "def get_matching_s3_objects(client, bucket, prefix=\"\", suffix=\"\"):\n\n kwargs = {\"Bucket\": bucket}\n if isinstance(prefix, str):\n kwargs[\"Prefix\"] = prefix\n # logging.info(\"kwargs: %s\" % kwargs)\n while True:\n resp = client.list_objects_v2(**kwargs)\n try:\n contents = resp[\"Contents\"]\n except KeyError:\n return\n for obj in contents:\n key = obj[\"Key\"]\n if key.startswith(prefix) and key.endswith(suffix):\n yield obj\n try:\n kwargs[\"ContinuationToken\"] = resp[\"NextContinuationToken\"]\n except KeyError:\n break", "def names(path, filter=None):", "def arn_for_objects(self, key_pattern: str) -> str:\n ...", "def wildcard(s, star_min=1):\n\n def _feed_parts(input_parts):\n for part in input_parts:\n if part == \"*\":\n if star_min == 0:\n yield \".*\"\n elif star_min == 1:\n yield \".+\"\n else:\n yield f\".{{{star_min},}}\"\n elif part == \"?\":\n yield \".\"\n else:\n yield re.escape(part)\n\n return \"\".join(_feed_parts(re.split(r'([\\?\\*])', s)))", "def parse_s3_uri(URIs):\n buckets, keys = [], []\n for URI in URIs:\n uri_path = path.normpath(URI).split(\"/\")\n buckets.append(uri_path[1])\n keys.append(uri_path[2:])\n\n return buckets, keys", "def UriStrFor(iterated_uri, obj):\n return '%s://%s/%s' % (iterated_uri.scheme, obj.bucket.name, obj.name)", "def glob_escape(pathname):\n # Escaping is done by wrapping any of \"*?[\" between square brackets.\n # Metacharacters do not work in the drive part and shouldn't be escaped.\n drive, pathname = os.path.splitdrive(pathname)\n if isinstance(pathname, bytes):\n pathname = _magic_check_bytes.sub(br'[\\1]', pathname)\n else:\n pathname = _magic_check.sub(r'[\\1]', pathname)\n return drive + pathname", "def re_obj_sub(objects):\n\n def func(match, create_new=False, *args, **kwargs):\n \"\"\"Processing match objects. Replacing objects GUIDs.\n \"\"\"\n\n data = match.group(0)\n\n if create_new:\n dash = data.replace(\"_\", \"-\") if \"_\" in data else data\n\n if dash not in objects:\n new = gen_guid()\n objects[dash] = new\n objects[dash.replace(\"-\", \"_\")] = new.replace(\"-\", \"_\")\n\n if data in objects:\n return (objects[data], True)\n\n return (data, False)\n\n return func", "def split_gcs_uri(gcs_uri):\n m = GCS_REGEX.match(gcs_uri)\n bucket = m.group(1)\n path = \"\"\n if m.group(2):\n path = m.group(2).lstrip(\"/\")\n return bucket, path", "def list_objects(self, s3_prefix_path):\n bucket_name, prefix = S3Util.get_bucket_and_key(s3_prefix_path)\n bucket = self.s3_resource.Bucket(bucket_name)\n return [\"s3://\" + bucket_name + \"/\" + key.key for key in bucket.objects.filter(Prefix=prefix)]", "def EscapeWildcards(string: Text) -> Text:\n precondition.AssertType(string, Text)\n return string.replace(\"%\", r\"\\%\").replace(\"_\", r\"\\_\")" ]
[ "0.55044866", "0.5206521", "0.5049598", "0.50417286", "0.48971736", "0.4896779", "0.4889999", "0.47994387", "0.47948807", "0.4794746", "0.47835147", "0.47599638", "0.47182953", "0.46996891", "0.46986145", "0.46973294", "0.46916848", "0.46824795", "0.46782288", "0.46630394", "0.4661961", "0.46554512", "0.46553564", "0.46383303", "0.4627949", "0.46165597", "0.4612059", "0.46012527", "0.4596684", "0.4593894" ]
0.65520847
0
Checks copy request for problems, and builds needed base_dst_uri. base_dst_uri is the base uri to be used if it's a multiobject copy, e.g., the URI for the destination bucket. The actual dst_uri can then be constructed from the src_uri and this base_dst_uri.
def ErrorCheckCopyRequest(self, src_uri_expansion, dst_uri_str, headers, debug, command='cp'): for src_uri in src_uri_expansion: if src_uri.is_cloud_uri() and not src_uri.bucket_name: raise CommandException('Provider-only src_uri (%s)') if ContainsWildcard(dst_uri_str): matches = list(self.CmdWildcardIterator(dst_uri_str, headers=headers, debug=debug)) if len(matches) > 1: raise CommandException('Destination (%s) matches more than 1 URI' % dst_uri_str) base_dst_uri = matches[0] else: base_dst_uri = self.StorageUri(dst_uri_str, debug=debug) # Make sure entire expansion didn't result in nothing to copy. This can # happen if user request copying a directory w/o -r option, for example. have_work = False for v in src_uri_expansion.values(): if v: have_work = True break if not have_work: raise CommandException('Nothing to copy') # If multi-object copy request ensure base_dst_uri names a container. multi_src_request = (len(src_uri_expansion) > 1 or len(src_uri_expansion.values()[0]) > 1) if multi_src_request: self.InsistUriNamesContainer(command, base_dst_uri) # Ensure no src/dest pairs would overwrite src. Note that this is # more restrictive than the UNIX 'cp' command (which would, for example, # allow "mv * dir" and just skip the implied mv dir dir). We disallow such # partial completion operations in cloud copies because they are risky. for src_uri in iter(src_uri_expansion): for exp_src_uri in src_uri_expansion[src_uri]: new_dst_uri = self.ConstructDstUri(src_uri, exp_src_uri, base_dst_uri) if self.SrcDstSame(exp_src_uri, new_dst_uri): raise CommandException('cp: "%s" and "%s" are the same object - ' 'abort.' % (exp_src_uri.uri, new_dst_uri.uri)) return (base_dst_uri, multi_src_request)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ConstructDstUri(self, src_uri, exp_src_uri, base_dst_uri):\n if base_dst_uri.names_container():\n # To match naming semantics of UNIX 'cp' command, copying files\n # to buckets/dirs should result in objects/files named by just the\n # final filename component; while copying directories should result\n # in objects/files mirroring the directory hierarchy. Example of the\n # first case:\n # gsutil cp dir1/file1 gs://bucket\n # should create object gs://bucket/file1\n # Example of the second case:\n # gsutil cp dir1/dir2 gs://bucket\n # should create object gs://bucket/dir2/file2 (assuming dir1/dir2\n # contains file2).\n if src_uri.names_container():\n dst_path_start = (src_uri.object_name.rstrip(os.sep)\n .rpartition(os.sep)[-1])\n start_pos = exp_src_uri.object_name.find(dst_path_start)\n dst_key_name = exp_src_uri.object_name[start_pos:]\n else:\n # src is a file or object, so use final component of src name.\n dst_key_name = os.path.basename(exp_src_uri.object_name)\n if base_dst_uri.is_file_uri():\n # dst names a directory, so append src obj name to dst obj name.\n dst_key_name = '%s%s%s' % (base_dst_uri.object_name, os.sep,\n dst_key_name)\n self.CheckForDirFileConflict(exp_src_uri, dst_key_name)\n else:\n # dest is an object or file: use dst obj name\n dst_key_name = base_dst_uri.object_name\n return base_dst_uri.clone_replace_name(dst_key_name)", "def HandleMultiSrcCopyRequst(self, src_uri_expansion, dst_uri):\n # If src_uri and dst_uri both name containers, handle\n # two cases to make copy command work like UNIX \"cp -r\" works:\n # a) if dst_uri names a non-existent directory, copy objects to a new\n # directory with the dst_uri name. In this case,\n # gsutil gs://bucket/a dir\n # should create dir/a.\n # b) if dst_uri names an existing directory, copy objects under that\n # directory. In this case,\n # gsutil gs://bucket/a dir\n # should create dir/bucket/a.\n src_uri_to_check = src_uri_expansion.keys()[0]\n if (src_uri_to_check.names_container() and dst_uri.names_container() and\n os.path.exists(dst_uri.object_name)):\n new_name = ('%s%s%s' % (dst_uri.object_name, os.sep,\n src_uri_to_check.bucket_name)).rstrip('/')\n dst_uri = dst_uri.clone_replace_name(new_name)\n # Create dest directory if needed.\n if dst_uri.is_file_uri() and not os.path.exists(dst_uri.object_name):\n os.makedirs(dst_uri.object_name)\n return dst_uri", "def PerformCopy(self, src_uri, dst_uri, sub_opts=None, headers=None, debug=0):\n # Make a copy of the input headers each time so we can set a different\n # MIME type for each object.\n if headers:\n headers = headers.copy()\n else:\n headers = {}\n\n src_key = src_uri.get_key(False, headers)\n if not src_key:\n raise CommandException('\"%s\" does not exist.' % src_uri)\n\n # Separately handle cases to avoid extra file and network copying of\n # potentially very large files/objects.\n\n if src_uri.is_cloud_uri() and dst_uri.is_cloud_uri():\n if src_uri.scheme == dst_uri.scheme:\n return self.CopyObjToObjSameProvider(src_key, src_uri, dst_uri,\n headers)\n else:\n return self.CopyObjToObjDiffProvider(sub_opts, src_key, src_uri,\n dst_uri, headers, debug)\n elif src_uri.is_file_uri() and dst_uri.is_cloud_uri():\n return self.UploadFileToObject(sub_opts, src_key, src_uri, dst_uri,\n headers, debug)\n elif src_uri.is_cloud_uri() and dst_uri.is_file_uri():\n return self.DownloadObjectToFile(src_key, src_uri, dst_uri, headers,\n debug)\n elif src_uri.is_file_uri() and dst_uri.is_file_uri():\n return self.CopyFileToFile(src_key, dst_uri, headers)\n else:\n raise CommandException('Unexpected src/dest case')", "def ProcessTableCopyConfiguration(ref, args, request):\n del ref # Unused\n source_ref = args.CONCEPTS.source.Parse()\n destination_ref = args.CONCEPTS.destination.Parse()\n arg_utils.SetFieldInMessage(\n request, 'job.configuration.copy.destinationTable.datasetId',\n destination_ref.Parent().Name())\n arg_utils.SetFieldInMessage(\n request, 'job.configuration.copy.destinationTable.projectId',\n destination_ref.projectId)\n arg_utils.SetFieldInMessage(request,\n 'job.configuration.copy.destinationTable.tableId',\n destination_ref.Name())\n arg_utils.SetFieldInMessage(request,\n 'job.configuration.copy.sourceTable.datasetId',\n source_ref.Parent().Name())\n arg_utils.SetFieldInMessage(request,\n 'job.configuration.copy.sourceTable.projectId',\n source_ref.projectId)\n arg_utils.SetFieldInMessage(request,\n 'job.configuration.copy.sourceTable.tableId',\n source_ref.Name())\n return request", "def copy(self, dest):\n if not isinstance(dest, Request):\n raise ValueError(\"'%s' should be a sub-class of 'Request'\" % dest)\n return dest.update_url(self.url).update_verb(self.verb)\\\n .update_params(self.params).update_data(self.data)", "def copy(self, target_uri_type=None, target_uri=None, soft_link=False,\n no_copy=False):\n # XOR: only one of target_uri and target_uri_type\n # should be specified\n assert((target_uri is None) != (target_uri_type is None))\n if target_uri is None:\n path = None\n uri_type = target_uri_type\n elif isinstance(target_uri, CaperURI):\n path = target_uri.get_uri()\n uri_type = target_uri.uri_type\n else:\n path = target_uri\n uri_type = CaperURI.__get_uri_type(target_uri)\n\n if path is None and uri_type == self._uri_type:\n return self._uri\n\n # here, path is target path\n # get target path\n if uri_type == URI_URL:\n assert(path is None and no_copy)\n path = self.__get_url()\n method = 'url_forming'\n\n elif uri_type == URI_GCS:\n if path is None:\n path = self.__get_gcs_file_name()\n\n elif uri_type == URI_S3:\n if path is None:\n path = self.__get_s3_file_name()\n\n elif uri_type == URI_LOCAL:\n if path is None:\n path = self.__get_local_file_name()\n os.makedirs(os.path.dirname(path), exist_ok=True)\n\n else:\n raise NotImplementedError('uri_type: {}'.format(uri_type))\n\n # special treatment for URL to cloud (gcs, s3)\n if uri_type in (URI_GCS, URI_S3) and \\\n self._uri_type == URI_URL:\n # # there is no way to get URL's file size before it's downloaded\n # # (since \"Content-Length\" header is optional)\n # # and not all websites support it (e.g. AWS)\n # wait until .lock file disappears\n # cu_target = CaperURI(path)\n # cu_target.__wait_for_lock()\n # if cu_target.file_exists() and \\\n # self.get_file_size() == cu_target.get_file_size():\n # if CaperURI.VERBOSE:\n # print('[CaperURI] copying skipped, '\n # 'target: {target}'.format(target=path))\n # return cu_target\n\n # URL to local and then local to cloud\n tmp_local_f = CaperURI(self._uri).get_file(\n uri_type=URI_LOCAL, no_copy=no_copy)\n return CaperURI(tmp_local_f).copy(target_uri=path,\n no_copy=no_copy)\n if soft_link:\n if uri_type == URI_GCS and self._uri_type == URI_GCS:\n return self._uri\n elif uri_type == URI_S3 and self._uri_type == URI_S3:\n return self._uri\n\n if CaperURI.VERBOSE and uri_type not in (URI_URL,):\n if soft_link and self._uri_type == URI_LOCAL \\\n and uri_type == URI_LOCAL:\n method = 'symlinking'\n else:\n method = 'copying'\n print('[CaperURI] {method} from '\n '{src} to {target}, src: {uri}'.format(\n method=method,\n src=self._uri_type, target=uri_type, uri=self._uri))\n\n action = 'skipped'\n if not no_copy:\n assert(path is not None)\n # wait until .lock file disappears\n cu_target = CaperURI(path)\n cu_target.__wait_for_lock()\n\n # if target file not exists or file sizes are different\n # then do copy!\n if uri_type not in (URI_URL,) and (not cu_target.file_exists() or \\\n self.get_file_size() != cu_target.get_file_size()):\n\n action = 'done'\n cu_lock = CaperURI(path + CaperURI.LOCK_EXT)\n try:\n # create an empty .lock file\n cu_lock.write_str_to_file('', quiet=True)\n\n # do copy\n if uri_type == URI_GCS:\n if self._uri_type == URI_URL:\n assert(False)\n\n elif self._uri_type == URI_GCS or \\\n self._uri_type == URI_S3 \\\n or self._uri_type == URI_LOCAL:\n check_call(['gsutil', '-q', 'cp', self._uri, path])\n else:\n path = None\n\n elif uri_type == URI_S3:\n if self._uri_type == URI_URL:\n assert(False)\n\n elif self._uri_type == URI_GCS:\n check_call(['gsutil', '-q', 'cp', self._uri, path])\n\n elif self._uri_type == URI_S3 or \\\n self._uri_type == URI_LOCAL:\n if CaperURI.USE_GSUTIL_OVER_AWS_S3:\n check_call(['gsutil', '-q', 'cp',\n self._uri, path])\n else:\n check_call(['aws', 's3', 'cp',\n '--only-show-errors',\n self._uri, path])\n else:\n path = None\n\n elif uri_type == URI_LOCAL:\n if self._uri_type == URI_LOCAL:\n if soft_link:\n if CaperURI.VERBOSE:\n method = 'symlinking'\n try:\n os.symlink(self._uri, path)\n except OSError as e:\n if e.errno == errno.EEXIST:\n os.remove(path)\n os.symlink(self._uri, path)\n else:\n if CaperURI.VERBOSE:\n method = 'copying'\n shutil.copy2(self._uri, path)\n\n elif self._uri_type == URI_URL:\n # we need \"curl -C -\" to resume downloading\n # but it always fails with HTTP ERR 416 when file\n # is already fully downloaded, i.e. path exists\n _, _, _, http_err = CaperURI.__curl_auto_auth(\n ['curl', '-RL', '-f', '-C', '-',\n self._uri, '-o', path],\n ignored_http_err=(416,))\n if http_err in (416,):\n action = 'skipped'\n\n elif self._uri_type == URI_GCS or \\\n self._uri_type == URI_S3 and \\\n CaperURI.USE_GSUTIL_OVER_AWS_S3:\n check_call(['gsutil', '-q', 'cp', self._uri, path])\n elif self._uri_type == URI_S3:\n check_call(['aws', 's3', 'cp',\n '--only-show-errors',\n self._uri, path])\n else:\n path = None\n\n else:\n raise NotImplementedError('uri_type: {}'.format(\n uri_type))\n\n if path is None:\n raise NotImplementedError('uri_types: {}, {}'.format(\n self._uri_type, uri_type))\n finally:\n # remove .lock file\n cu_lock.rm(quiet=True)\n\n if CaperURI.VERBOSE and uri_type not in (URI_URL,):\n print('[CaperURI] {method} {action}, target: {target}'.format(\n method=method, action=action, target=path))\n return path", "def infocalypse_copy(ui_, repo, **opts):\n params, stored_cfg = get_config_info(ui_, opts)\n\n insert_uri = opts['inserturi']\n if insert_uri == '':\n # REDFLAG: fix parameter definition so that it is required?\n ui_.warn(\"Please set the insert URI with --inserturi.\\n\")\n return\n\n request_uri = opts['requesturi']\n if request_uri == '':\n request_uri = stored_cfg.get_request_uri(repo.root)\n if not request_uri:\n ui_.warn(\"There is no stored request URI for this repo.\\n\"\n \"Please set one with the --requesturi option.\\n\")\n return\n\n params['INSERT_URI'] = insert_uri\n params['REQUEST_URI'] = request_uri\n execute_copy(ui_, repo, params, stored_cfg)", "def modifySrcDstForZipDownload(src, dstBase): \n\n src = src + \"?format=zip\"\n dst = os.path.join(dstBase , 'projects' + \n src.replace('?format=zip', '').\\\n split('projects')[1].split('/files')[0] + '/files.zip')\n return src, dst", "def _remote_copy(self, source, destn):\n s = remote_copy(host_ip=self.ip, username=self.username,\n password=self.password, source=source, destn=destn)\n\n if s.get('status') == \"Failed\":\n raise AssertionError(s.get('error', \"Error encountered\"))\n\n return s", "def run_copy(self, src, dst):\n pass", "def copy(self, src_path: str, tgt_path: str) -> None:", "def _map_base_url(base):\n # This uses eventual consistency and cannot be made strongly consistent.\n for rule in models_chromium.UrlMap.query().order(\n models_chromium.UrlMap.base_url_template):\n base_template = r'^%s$' % rule.base_url_template\n match = re.match(base_template, base)\n if not match:\n continue\n try:\n src_url = re.sub(base_template,\n rule.source_code_url_template,\n base)\n except re.error, err:\n logging.error('err: %s base: \"%s\" rule: \"%s\" => \"%s\"',\n err, base, rule.base_url_template,\n rule.source_code_url_template)\n return None\n return src_url\n return None", "def copy_db(src=FRESHDB, dst=[APPDB]):\n for dest in dst:\n try:\n x = shutil.copy2(src, dest)\n print('File copied to {}'.format(x))\n except shutil.SameFileError:\n print('Both source and destination are identical.')", "def CopyObjsCommand(self, args, sub_opts=None, headers=None, debug=0,\n command='cp'):\n # Expand wildcards and containers in source StorageUris.\n src_uri_expansion = self.ExpandWildcardsAndContainers(\n args[0:len(args)-1], sub_opts, headers, debug)\n\n # Check for various problems and determine base_dst_uri based for request.\n (base_dst_uri, multi_src_request) = self.ErrorCheckCopyRequest(\n src_uri_expansion, args[-1], headers, debug, command)\n # Rewrite base_dst_uri and create dest dir as needed for multi-source copy.\n if multi_src_request:\n base_dst_uri = self.HandleMultiSrcCopyRequst(src_uri_expansion,\n base_dst_uri)\n\n # Now iterate over expanded src URIs, and perform copy operations.\n total_elapsed_time = total_bytes_transferred = 0\n for src_uri in iter(src_uri_expansion):\n for exp_src_uri in src_uri_expansion[src_uri]:\n print 'Copying %s...' % exp_src_uri\n dst_uri = self.ConstructDstUri(src_uri, exp_src_uri, base_dst_uri)\n (elapsed_time, bytes_transferred) = self.PerformCopy(\n exp_src_uri, dst_uri, sub_opts, headers, debug)\n total_elapsed_time += elapsed_time\n total_bytes_transferred += bytes_transferred\n if debug == 3:\n # Note that this only counts the actual GET and PUT bytes for the copy\n # - not any transfers for doing wildcard expansion, the initial HEAD\n # request boto performs when doing a bucket.get_key() operation, etc.\n if total_bytes_transferred != 0:\n print 'Total bytes copied=%d, total elapsed time=%5.3f secs (%sps)' % (\n total_bytes_transferred, total_elapsed_time,\n MakeHumanReadable(float(total_bytes_transferred) /\n float(total_elapsed_time)))", "def copy(self, source_host, dest_host, filename):", "async def copy(self, _id: str, dst_id: str, *,\n rev: Optional[str] = None,\n dst_rev: Optional[str] = None,\n batch: Optional[bool] = None) -> dict:\n\n headers = dict(\n Destination=f'{dst_id}?rev={dst_rev}' if dst_rev else dst_id\n )\n\n params = dict(\n rev=rev,\n batch=\"ok\" if batch else None,\n )\n\n return await self.__connection.query('COPY', self._get_path(_id), params=params, headers=headers)", "def copy_node(self, job):\n transfer = Transfer(job.jobInfo)\n target = transfer.target\n direction = transfer.direction\n result = None\n # Check uris\n check_uri(target, self.sm, shouldExist = True)\n checks = check_uri(direction, self.sm, shouldExist = False)\n # Retrieve existing record\n node = self.sm.get_node(target)[0]['node']\n node = self.nf.get_node(node)\n # Check whether endpoint is reserved URI\n if direction.endswith(AUTO): \n direction = generate_uri(direction)\n result = {'destination': direction}\n # Check if endpoint is a container\n if checks['exists'] and checks['container']: direction += target[target.rfind('/'):]\n # Copy it\n old_location = self.sm.get_location(target)[0]['location']\n new_location = self._create_copy(node, direction)\n # Check if target is a container\n if isinstance(node, ContainerNode):\n # Copy children\n for child in self.sm.get_all_children(target):\n child_node = self.nf.get_node(self.sm.get_node(child)[0]['node'])\n new_uri = child_node.uri.replace(target, direction)\n self._create_copy(child_node, new_uri)\n # Copy bytes \n if isinstance(node, ContainerNode):\n shutil.copytree(old_location, new_location)\n else:\n shutil.copy(old_location, new_location)\n return result", "def copy(self):\r\n ret=' '\r\n if self.REQUEST.SESSION.has_key('my_path'):\r\n\t zpath=self.REQUEST.SESSION['my_path'].replace('toolbox_root','').strip('/')\r\n\t #ret=zpath\r\n\t if self.REQUEST.SESSION.has_key('copy_bild'):\r\n\t\t cp_bild=self.REQUEST.SESSION['copy_bild'].split('/')[-1].strip('/')\r\n\t\t cp_path=str('/').join(self.REQUEST.SESSION['copy_bild'].split('/')[0:-1])\r\n\t\t #ret+=' '+cp_path+' '+cp_bild\r\n\t\t if cp_path!=zpath:\r\n\t\t \tn_id=search_id(self,self.restrictedTraverse(zpath).objectValues('Image'))\r\n\t\t \t#ret+=' '+n_id\r\n\t\t\tfor x in liste_val:\r\n\t\t\t\ttry:\r\n\t\t\t\t\tfor obj in self.restrictedTraverse(cp_path).objectValues('Image'):\r\n\t\t\t\t\t if str(obj.getId())[0:6]==cp_bild:\r\n\t\t\t\t\t\tmy_clip=self.restrictedTraverse(cp_path).manage_copyObjects([obj.getId()])\r\n\t\t\t\t\t\tcopied=self.restrictedTraverse(zpath).manage_pasteObjects(my_clip)\r\n\t\t\t\t\t\t#ret+=' new id : '+str(copied[0]['new_id'])\r\n\t\t\t\t\t\t#if str(copied[0]['new_id']).split('_')[0]!=n_id:\r\n\t\t\t\t\t\t#\tself.restrictedTraverse(zpath).manage_renameObjects([str(copied[0]['new_id'])],[str(n_id+x)])\r\n\t\t\t\t\t\t\t#ret +=' False '\r\n\t\t\t\t\t\t#ret+='<br>\\n'\r\n\t\t\t\texcept:\r\n\t\t\t\t\tret+=''\r\n else:\r\n\t ret=' '\r\n return ' '", "def src_to_dst(self,src_uri):\n m=re.match(self.src_root+\"(.*)$\",src_uri)\n if (m is None):\n raise \"FIXME - Does not match\"\n rel_path=m.group(1)\n if (os.sep != '/'):\n # if directoty path sep isn't / then translate for URI \n rel_path=rel_path.replace('/',os.sep)\n return(self.dst_root+rel_path)", "def svn_info_t_copyfrom_url_get(svn_info_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def _prepare_url(self):\n\n base_url = '{}://{}{}'.format(\n self.client.protocol, self.client.base_url, self.api_path\n )\n url_parts = '/'.join(\n [part for part in self.parameters[constants.RequestConst.PATH]]\n )\n\n if url_parts:\n final_url = '{}/{}'.format(base_url, url_parts)\n else:\n final_url = base_url\n\n if self.method == constants.RequestConst.GET:\n params = self.parameters[constants.RequestConst.QUERY]\n for param, value in params.items():\n if isinstance(value, list):\n params[param] = ','.join(value)\n elif isinstance(value, dict):\n params[param] = ','.join([f'{k}:{v}' for k, v in value])\n\n url_query = '?' + '&'.join([f'{k}={v}' for k, v in params.items()])\n final_url = '{}{}'.format(final_url, url_query)\n\n self.debug.ok('final url', final_url)\n\n return final_url", "def _copy(self, src, dest):\n\t\ttry:\n\t\t\tself.bucket.copy_key(dest, self.bucket.name, src)\n\t\texcept boto.exception.S3CopyError as e:\n\t\t\tself.log.debug(\"bucket copy failed for on %s failed\", dest, exc_info=True)\n\t\t\traise e", "def svn_client_commit_item_t_copyfrom_url_get(svn_client_commit_item_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def _fwd_set_ruri(target, fwd_result):\n # The Request-URI in the copy's start line MUST be replaced with the URI for this target. If the URI contains\n # any parameters not allowed in a Request-URI, they MUST be removed.\n sip_msg, opts = fwd_result\n sip_msg.ruri = target.clear_not_allowed_parts('ruri')\n return sip_msg, opts", "def copy_object(self, source_bucket, source_key, dest_bucket, dest_key) -> None:\n self.resource.Object(dest_bucket, dest_key).copy_from(CopySource=path.join(source_bucket, source_key))", "def svn_client_commit_item2_t_copyfrom_url_get(svn_client_commit_item2_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def getFile(self, _src, _dst):\n\n #--------------------\n # Reset total size of downloads for all files\n #-------------------------\n self.downloadTracker['totalDownloadSize']['bytes'] = 0\n self.downloadTracker['downloadedSize']['bytes'] = 0\n downloadFolders = []\n\n #-------------------------\n # Remove existing dst files from their local URI\n #-------------------------\n if os.path.exists(_dst):\n os.remove(_dst)\n self.__getFile_requests(_src, _dst)", "def _prepare_dst_dir(self, dst, src=None, perm=None, **kwargs):\n rstat = self.exists(dst, stat=True)\n\n if rstat:\n if self.file_interface.isdir(dst, stat=rstat) and src:\n full_dst = os.path.join(dst, os.path.basename(src))\n else:\n full_dst = dst\n\n else:\n # interpret dst as a file name, create missing dirs\n dst_dir = self.dirname(dst)\n if dst_dir and self.create_file_dir and not self.isdir(dst_dir):\n self.mkdir(dst_dir, perm=perm, recursive=True, **kwargs)\n full_dst = dst\n\n return full_dst", "def _base_uri(self) -> str:\n if self.use_original_uri:\n header_value = self.use_original_uri.get(\"header_value\")\n conditions = self.use_original_uri.get(\"claim_conditions\")\n if conditions.get(\"any\"):\n uri = self.request.headers.get(header_value)\n else:\n key = self.claims.get(conditions.get(\"claim_key\"))\n val = self.claims.get(conditions.get(\"claim_value\"))\n if self.claims.get(key) == val:\n uri = self.request.headers.get(header_value)\n else:\n uri = self.request.uri\n else:\n uri = self.request.uri\n if not uri:\n uri = self.request.uri\n return uri.split(\"?\")[0]", "def _cached_copy(self, src, dst, perm=None, cache=None, prefer_cache=False, validate=None,\n **kwargs):\n if self.cache is None:\n cache = False\n elif cache is None:\n cache = self.use_cache\n else:\n cache = bool(cache)\n\n # ensure absolute paths\n src = self.abspath(src)\n dst = dst and self.abspath(dst) or None\n\n # determine the copy mode for code readability\n # (remote-remote: \"rr\", remote-local: \"rl\", remote-cache: \"rc\", ...)\n src_local = self.is_local(src)\n dst_local = dst and self.is_local(dst)\n mode = \"rl\"[src_local] + (\"rl\"[dst_local] if dst is not None else \"c\")\n\n # disable caching when the mode is local-local, local-cache or remote-remote\n if mode in (\"ll\", \"lc\", \"rr\"):\n cache = False\n\n # dst can be None, but in this case, caching should be enabled\n if dst is None and not cache:\n raise Exception(\"copy destination must not be empty when caching is disabled\")\n\n if not cache:\n # simply copy and return the dst path\n return self._atomic_copy(src, dst, perm=perm, validate=validate, **kwargs)\n\n kwargs_no_retries = kwargs.copy()\n kwargs_no_retries[\"retries\"] = 0\n\n # handle 3 cases: lr, rl, rc\n if mode == \"lr\":\n # strategy: copy to remote, copy to cache, sync stats\n\n # copy to remote, no need to validate as we compute the stat anyway\n dst_uri = self._atomic_copy(src, dst, perm=perm, validate=False, **kwargs)\n rstat = self.stat(dst, **kwargs_no_retries)\n\n # remove the cache entry\n if dst in self.cache:\n logger.debug(\"removing destination file {} from cache\".format(dst))\n self.cache.remove(dst)\n\n # allocate cache space and copy to cache\n lstat = self.local_fs.stat(src)\n self.cache.allocate(lstat.st_size)\n cdst_uri = add_scheme(self.cache.cache_path(dst), \"file\")\n with self.cache.lock(dst):\n logger.debug(\"loading source file {} to cache\".format(src))\n self._atomic_copy(src, cdst_uri, validate=False)\n self.cache.touch(dst, (int(time.time()), rstat.st_mtime))\n\n return dst_uri\n\n else: # rl, rc\n # strategy: copy to cache when not up to date, sync stats, opt. copy to local\n\n # build the uri to the cache path of the src file\n csrc_uri = add_scheme(self.cache.cache_path(src), \"file\")\n\n # if the file is cached and prefer_cache is true,\n # return the cache path, no questions asked\n # otherwise, check if the file is there and up to date\n if not prefer_cache or src not in self.cache:\n with self.cache.lock(src):\n # in cache and outdated?\n rstat = self.stat(src, **kwargs_no_retries)\n if src in self.cache and not self.cache.check_mtime(src, rstat.st_mtime):\n logger.debug(\"source file {} is outdated in cache, removing\".format(src))\n self.cache.remove(src, lock=False)\n # in cache at all?\n if src not in self.cache:\n self.cache.allocate(rstat.st_size)\n self._atomic_copy(src, csrc_uri, validate=validate, **kwargs)\n logger.debug(\"loading source file {} to cache\".format(src))\n self.cache.touch(src, (int(time.time()), rstat.st_mtime))\n\n if mode == \"rl\":\n # simply use the local_fs for copying\n self.local_fs.copy(csrc_uri, dst, perm=perm)\n return dst\n\n # mode is rc\n return csrc_uri" ]
[ "0.7059639", "0.6167912", "0.5738104", "0.5286239", "0.5260773", "0.5251806", "0.51933813", "0.51639533", "0.51353896", "0.5022377", "0.497357", "0.49711338", "0.4926278", "0.49253422", "0.49151328", "0.4913535", "0.48611373", "0.48356867", "0.48299488", "0.4821887", "0.48125136", "0.47991696", "0.47720295", "0.47717524", "0.47566757", "0.4731375", "0.47126076", "0.4706348", "0.46991476", "0.4692716" ]
0.67942697
1
Rewrites dst_uri and creates dest dir as needed, if this is a multisource copy.
def HandleMultiSrcCopyRequst(self, src_uri_expansion, dst_uri): # If src_uri and dst_uri both name containers, handle # two cases to make copy command work like UNIX "cp -r" works: # a) if dst_uri names a non-existent directory, copy objects to a new # directory with the dst_uri name. In this case, # gsutil gs://bucket/a dir # should create dir/a. # b) if dst_uri names an existing directory, copy objects under that # directory. In this case, # gsutil gs://bucket/a dir # should create dir/bucket/a. src_uri_to_check = src_uri_expansion.keys()[0] if (src_uri_to_check.names_container() and dst_uri.names_container() and os.path.exists(dst_uri.object_name)): new_name = ('%s%s%s' % (dst_uri.object_name, os.sep, src_uri_to_check.bucket_name)).rstrip('/') dst_uri = dst_uri.clone_replace_name(new_name) # Create dest directory if needed. if dst_uri.is_file_uri() and not os.path.exists(dst_uri.object_name): os.makedirs(dst_uri.object_name) return dst_uri
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def src_to_dst(self,src_uri):\n m=re.match(self.src_root+\"(.*)$\",src_uri)\n if (m is None):\n raise \"FIXME - Does not match\"\n rel_path=m.group(1)\n if (os.sep != '/'):\n # if directoty path sep isn't / then translate for URI \n rel_path=rel_path.replace('/',os.sep)\n return(self.dst_root+rel_path)", "def _prepare_dst_dir(self, dst, src=None, perm=None, **kwargs):\n if self.isdir(dst):\n full_dst = os.path.join(dst, os.path.basename(src)) if src else dst\n\n elif self.isfile(dst):\n full_dst = dst\n\n else:\n # interpret dst as a file name, create missing dirs\n dst_dir = self.dirname(dst)\n if dst_dir and self.create_file_dir and not self.isdir(dst_dir):\n self.mkdir(dst_dir, perm=perm, recursive=True)\n full_dst = dst\n\n return full_dst", "def ConstructDstUri(self, src_uri, exp_src_uri, base_dst_uri):\n if base_dst_uri.names_container():\n # To match naming semantics of UNIX 'cp' command, copying files\n # to buckets/dirs should result in objects/files named by just the\n # final filename component; while copying directories should result\n # in objects/files mirroring the directory hierarchy. Example of the\n # first case:\n # gsutil cp dir1/file1 gs://bucket\n # should create object gs://bucket/file1\n # Example of the second case:\n # gsutil cp dir1/dir2 gs://bucket\n # should create object gs://bucket/dir2/file2 (assuming dir1/dir2\n # contains file2).\n if src_uri.names_container():\n dst_path_start = (src_uri.object_name.rstrip(os.sep)\n .rpartition(os.sep)[-1])\n start_pos = exp_src_uri.object_name.find(dst_path_start)\n dst_key_name = exp_src_uri.object_name[start_pos:]\n else:\n # src is a file or object, so use final component of src name.\n dst_key_name = os.path.basename(exp_src_uri.object_name)\n if base_dst_uri.is_file_uri():\n # dst names a directory, so append src obj name to dst obj name.\n dst_key_name = '%s%s%s' % (base_dst_uri.object_name, os.sep,\n dst_key_name)\n self.CheckForDirFileConflict(exp_src_uri, dst_key_name)\n else:\n # dest is an object or file: use dst obj name\n dst_key_name = base_dst_uri.object_name\n return base_dst_uri.clone_replace_name(dst_key_name)", "def _prepare_dst_dir(self, dst, src=None, perm=None, **kwargs):\n rstat = self.exists(dst, stat=True)\n\n if rstat:\n if self.file_interface.isdir(dst, stat=rstat) and src:\n full_dst = os.path.join(dst, os.path.basename(src))\n else:\n full_dst = dst\n\n else:\n # interpret dst as a file name, create missing dirs\n dst_dir = self.dirname(dst)\n if dst_dir and self.create_file_dir and not self.isdir(dst_dir):\n self.mkdir(dst_dir, perm=perm, recursive=True, **kwargs)\n full_dst = dst\n\n return full_dst", "def prepare(self, dst, options):\n self.checkExisting(dst)\n self.makedirs(dst.parent())", "def copyDir(src, dst, includes, excludes = []):\n\tmultiFilesReplacements([], dst, src, includes, excludes)", "def modifySrcDstForZipDownload(src, dstBase): \n\n src = src + \"?format=zip\"\n dst = os.path.join(dstBase , 'projects' + \n src.replace('?format=zip', '').\\\n split('projects')[1].split('/files')[0] + '/files.zip')\n return src, dst", "def copy_to_se(self, src, dst, create_parent_directory=True):\n mgm, dst = self._safe_split_mgm(dst)\n dst = self._join_mgm_lfn(mgm, dst)\n if create_parent_directory:\n parent_directory = osp.dirname(dst)\n self.create_directory(parent_directory)\n logger.warning('Copying {0} to {1}'.format(src, dst))\n cmd = [ 'xrdcp', '-s', src, dst ]\n svj.core.utils.run_command(cmd)", "def dst_to_src(self,dst_file):\n rel_path=os.path.relpath(dst_file,start=self.dst_root)\n if (rel_path == '.'):\n rel_path=''\n else:\n rel_path= '/'+rel_path\n if (os.sep != '/'):\n # if directoty path sep isn't / then translate for URI \n rel_path=rel_path.replace(os.sep,'/')\n return(self.src_root+rel_path)", "def _copy_dir(src, dst):\n if os.path.isdir(src):\n os.makedirs(dst, exist_ok=True)\n for item in os.listdir(src):\n s = os.path.join(src, item)\n d = os.path.join(dst, item)\n\n if os.path.isdir(s):\n _copy_dir(s, d)\n else:\n shutil.copy2(s, d)\n\n else:\n os.makedirs(os.path.dirname(dst), exist_ok=True)\n _delete_file(dst)\n shutil.copy2(src, dst)", "def putFolder(self, _dst):\n if not _dst.startswith(self.host + '/data'):\n if not _dst.startswith('/'):\n _dst = '/' + _dst\n _dst = self.host + '/data' + _dst\n #print(f\"\\n\\nXNAT 1 {_dst}\")\n _dst = str(Xnat.path.cleanUri(_dst)).encode('ascii', 'ignore')\n #print(f\"fXNAT 2 {_dst} \\n\\n\")\n response = self.__httpsRequest('PUT', _dst)\n return response", "def copy_file_to_multiple_subfolders(src, dst, *args, **kwargs):\n print '\\nSource: {}\\nDestinations parent folder: {}'.format(src, dst)\n filename = os.path.basename(src)\n for folder in (d for d in os.listdir(dst) if os.path.isdir(d)):\n print '\\nCopying {} to {}...'.format(filename, folder)\n try:\n shutil.copy(src, os.path.abspath(dst) + '\\\\' + folder)\n except Exception as e:\n print e", "def clone(src: str, dst: str):\n if dst is None:\n dst = getcwd()\n destination = path.abspath(dst)\n # TODO: replace with false this is just for testing:\n makedirs(destination, exist_ok=True)\n\n sync_chunk(src, destination)\n copy(src, destination)", "def safecopy(src, dst):\r\n abs_src = os.path.abspath(src)\r\n abs_dst = os.path.abspath(dst)\r\n if (abs_src != abs_dst) \\\r\n and os.path.isfile(abs_src): \r\n dirname = os.path.dirname(abs_dst)\r\n recurse_mkdir(dirname)\r\n shutil.copy(abs_src, abs_dst)", "def SrcDstSame(self, src_uri, dst_uri):\n if src_uri.is_file_uri() and dst_uri.is_file_uri():\n # Translate a/b/./c to a/b/c, so src=dst comparison below works.\n new_src_path = re.sub('%s+\\.%s+' % (os.sep, os.sep), os.sep,\n src_uri.object_name)\n new_src_path = re.sub('^.%s+' % os.sep, '', new_src_path)\n new_dst_path = re.sub('%s+\\.%s+' % (os.sep, os.sep), os.sep,\n dst_uri.object_name)\n new_dst_path = re.sub('^.%s+' % os.sep, '', new_dst_path)\n return (src_uri.clone_replace_name(new_src_path).uri ==\n dst_uri.clone_replace_name(new_dst_path).uri)\n else:\n return src_uri.uri == dst_uri.uri", "def copydir(src, dst):\n for item in os.listdir(src):\n s, d = os.path.join(src, item), os.path.join(dst, item)\n if os.path.isdir(s):\n if not os.path.isdir(d):\n os.mkdir(d)\n copydir(s, d)\n else:\n shutil.copy(s, d)", "def copy_dir(src, dst):\n try:\n debug.log(\"copy dir from \"+ src, \"to \"+ dst)\n shutil.copytree(src, dst)\n except Exception as e:\n debug.log(\"Error: happened while copying!\\n%s\\n\"%e)", "def copy_deep(src: str, dst: str, create_dst_dir: bool = False) -> None:\n system_is_darwin = platform.system().lower() == \"darwin\"\n if create_dst_dir:\n mkdir_p(os.path.dirname(dst))\n src_is_link = os.path.islink(src)\n dst_exists = os.path.lexists(dst)\n if os.path.isdir(src) and not src_is_link:\n logging.debug(\"Copying directory {} to {}\".format(src, dst))\n mkdir_p(dst)\n for name in os.listdir(src):\n copy_deep(os.path.join(src, name), os.path.join(dst, name))\n elif src_is_link:\n if dst_exists:\n return\n target = os.readlink(src)\n logging.debug(\"Creating symlink {} -> {}\".format(dst, target))\n os.symlink(target, dst)\n else:\n if dst_exists:\n if not system_is_darwin:\n return\n # Only overwrite the file if the source is newer than the destination.\n if os.path.getmtime(src) <= os.path.getmtime(dst):\n return\n logging.debug(\"Copying file {} to {}\".format(src, dst))\n # Preserve the file attributes.\n shutil.copy2(src, dst)", "def copy_one(self, src, dest):\n if self.manager.no_sourcemaps and self.is_ignored_sourcemap(src.name):\n return\n\n if dest.is_dir():\n shutil.rmtree(dest)\n elif dest.exists():\n dest.unlink()\n\n if not dest.parent.exists():\n self.log.debug(f\"creating folder {dest.parent}\")\n dest.parent.mkdir(parents=True)\n\n self.maybe_timestamp(dest.parent)\n\n copytree_kwargs = {}\n\n if self.manager.no_sourcemaps:\n copytree_kwargs[\"ignore\"] = SOURCEMAP_IGNORE_PATTERNS\n\n if src.is_dir():\n shutil.copytree(src, dest, **copytree_kwargs)\n else:\n shutil.copy2(src, dest)\n\n self.maybe_timestamp(dest)", "def copy_to_site():\n if os.path.exists(SITE_GFR):\n rmtree(SITE_GFR)\n\n try:\n os.makedirs(SITE_GFR)\n except OSError as e:\n # We don't care if it already exists although it shouldn't exist.\n if e.errno != errno.EEXIST:\n raise\n\n for route in os.listdir(GFR_ROUTES_LOCATION):\n copyfile(GFR_ROUTES_LOCATION + route, SITE_GFR + route)\n\n if os.path.exists(SITE_OUTPUT):\n rmtree(SITE_OUTPUT)\n\n try:\n os.makedirs(SITE_OUTPUT)\n except OSError as e:\n # We don't care if it already exists although it shouldn't exist.\n if e.errno != errno.EEXIST:\n raise\n\n for route in os.listdir(OUTPUT_LOCATION):\n copyfile(OUTPUT_LOCATION + route, SITE_OUTPUT + route)", "def copyseries(self, suuid, targetdir):\n seriesfiles = self.controller.db.getFiles(suuid)\n dest = False\n if seriesfiles is not None and len(seriesfiles) > 0:\n dest = join(targetdir, suuid)\n if not exists(dest):\n mkdir(dest)\n for f in seriesfiles:\n if not exists(join(dest, basename(f))):\n shutil.copy(f, dest)\n return dest", "def handle_multiple_destinations(self):\n\n # Create the to-directory if it does not exist\n for destination in config.dest:\n if not path.exists(destination.dest):\n makedirs(destination.dest)\n\n # Clone the modules and copy the right directories\n for module in config.modules:\n Logger.assemble_module(module)\n\n directory = path.join(TEMP_DIR, module.name)\n remove_dir(directory)\n clone(module, directory)\n self.commit_hashes[module.name] = self.get_commit_hash(directory)\n\n for destination in config.dest:\n to_directory = path.join(destination.dest, module.name)\n remove_dir(to_directory)\n shutil.move(\n path.join(TEMP_DIR, module.name, destination.src), to_directory\n )", "def moveAsset(self, src, dst):\n if not self.exists( self.dirname(dst) ):\n self.makedirs( self.dirname(dst) )\n self.move(src, dst)\n\n cache_src = self.cache_path(src)\n if not os.path.exists(cache_src):\n return \n\n cache_dst = self.cache_path(dst)\n if not os.path.exists( os.path.dirname(cache_dst) ):\n os.makedirs( os.path.dirname(cache_dst) )\n shutil.move(cache_src, cache_dst)", "def PerformCopy(self, src_uri, dst_uri, sub_opts=None, headers=None, debug=0):\n # Make a copy of the input headers each time so we can set a different\n # MIME type for each object.\n if headers:\n headers = headers.copy()\n else:\n headers = {}\n\n src_key = src_uri.get_key(False, headers)\n if not src_key:\n raise CommandException('\"%s\" does not exist.' % src_uri)\n\n # Separately handle cases to avoid extra file and network copying of\n # potentially very large files/objects.\n\n if src_uri.is_cloud_uri() and dst_uri.is_cloud_uri():\n if src_uri.scheme == dst_uri.scheme:\n return self.CopyObjToObjSameProvider(src_key, src_uri, dst_uri,\n headers)\n else:\n return self.CopyObjToObjDiffProvider(sub_opts, src_key, src_uri,\n dst_uri, headers, debug)\n elif src_uri.is_file_uri() and dst_uri.is_cloud_uri():\n return self.UploadFileToObject(sub_opts, src_key, src_uri, dst_uri,\n headers, debug)\n elif src_uri.is_cloud_uri() and dst_uri.is_file_uri():\n return self.DownloadObjectToFile(src_key, src_uri, dst_uri, headers,\n debug)\n elif src_uri.is_file_uri() and dst_uri.is_file_uri():\n return self.CopyFileToFile(src_key, dst_uri, headers)\n else:\n raise CommandException('Unexpected src/dest case')", "def link(self, dst):\n assert isinstance(dst, Path)\n link(dst._path, self._path)", "def _copy_to_media(self, template_name, source=''):\n dirpath = os.path.join(self.cache_root, os.path.dirname(template_name))\n filename = os.path.basename(template_name)\n fullpath = os.path.join(dirpath, filename)\n\n if not os.path.isfile(fullpath) or settings.DEBUG:\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n\n f = open(fullpath, 'w')\n f.write(source)\n f.close()\n\n return urljoin(self.cache_url, template_name), filename", "def copyAsset(self, src, dst, **kw):\n if self.isfile(src):\n self.copyfile(src, dst)\n else:\n # copy folder\n if not self.exists(dst):\n self.makedirs(dst)\n for name in self.listdir(src):\n self.copyAsset(self.joinpath(src, name), self.joinpath(dst, name), copycache=0)\n\n # copy cache\n cache_src = self.cache_path(src)\n if not os.path.exists(cache_src):\n return\n\n cache_dst = self.cache_path(dst)\n cache_dst_parent = os.path.dirname(cache_dst)\n if not os.path.exists( cache_dst_parent ):\n os.makedirs(cache_dst_parent )\n if not os.path.exists(cache_dst):\n ucopytree(cache_src, cache_dst)", "def MakeDestinationDirectories(self, dst_files):\n for dst in dst_files:\n path = os.path.dirname(dst);\n if (len(path) > 0) and (not os.path.exists(path)):\n self.VerboseMsg(\"Make Directory: \" + path)\n if self.execute:\n os.makedirs(path)", "def m_cp(*args):\n if (not args or len(args) < 2) :\n print(\"parameter is invalid\")\n return\n\n src = args[0:len(args)-1]\n dst = args[len(args)-1]\n\n if not os.path.exists(dst) :\n os.mkdir(dst)\n\n for s in src:\n print(\"cp %s ==> %s\" % (s, dst))\n cpfile(s, dst)", "def copy(src, dst):\n os.makedirs(os.path.dirname(dst), exist_ok=True)\n shutil.copy2(src, dst)" ]
[ "0.67312545", "0.6727234", "0.6659769", "0.6657753", "0.5765052", "0.57490075", "0.57084966", "0.56707317", "0.54465216", "0.5427129", "0.5386717", "0.5382597", "0.5380205", "0.5367291", "0.53512245", "0.5320187", "0.5288889", "0.526838", "0.5250299", "0.52383864", "0.5235054", "0.51928693", "0.5184047", "0.5170785", "0.5149981", "0.5141188", "0.51245785", "0.51202434", "0.5116604", "0.51111454" ]
0.6953039
0
Checks if src_uri and dst_uri represent same object. We don't handle anything about hard or symbolic links.
def SrcDstSame(self, src_uri, dst_uri): if src_uri.is_file_uri() and dst_uri.is_file_uri(): # Translate a/b/./c to a/b/c, so src=dst comparison below works. new_src_path = re.sub('%s+\.%s+' % (os.sep, os.sep), os.sep, src_uri.object_name) new_src_path = re.sub('^.%s+' % os.sep, '', new_src_path) new_dst_path = re.sub('%s+\.%s+' % (os.sep, os.sep), os.sep, dst_uri.object_name) new_dst_path = re.sub('^.%s+' % os.sep, '', new_dst_path) return (src_uri.clone_replace_name(new_src_path).uri == dst_uri.clone_replace_name(new_dst_path).uri) else: return src_uri.uri == dst_uri.uri
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other):\n if type(other) != URI:\n return False\n return (self._scheme == other._scheme \n and self._host == other._host \n and self._port == other._port \n and self._path == other._path\n and self._query == other._query\n and self._isRegularURI == other._isRegularURI)", "def __eq__(self, other: Any) -> bool:\n return isinstance(other, URI) and str(self) == str(other)", "def __eq__(self, other: object) -> bool:\n if not isinstance(other, URI):\n return NotImplemented\n return str(self) == str(other)", "def equals(self, uri):\r\n return self.uri == uri.uri", "def is_broken_link(self):\n if not os.path.exists(self.dst):\n if os.path.lexists(self.dst):\n return True\n return False", "def compare(src, dest):\n xsrc, xdest = os.path.exists(src), os.path.exists(dest)\n if not xsrc:\n return Cmp.nosrc\n if not xdest:\n return Cmp.nodest\n with open(src, \"rb\") as s:\n csrc = sha256(s.read()).digest()\n if xdest:\n with open(dest, \"rb\") as d:\n cdest = sha256(d.read()).digest()\n else:\n cdest = b\"\"\n if csrc == cdest:\n return Cmp.same\n return Cmp.differ", "def HandleMultiSrcCopyRequst(self, src_uri_expansion, dst_uri):\n # If src_uri and dst_uri both name containers, handle\n # two cases to make copy command work like UNIX \"cp -r\" works:\n # a) if dst_uri names a non-existent directory, copy objects to a new\n # directory with the dst_uri name. In this case,\n # gsutil gs://bucket/a dir\n # should create dir/a.\n # b) if dst_uri names an existing directory, copy objects under that\n # directory. In this case,\n # gsutil gs://bucket/a dir\n # should create dir/bucket/a.\n src_uri_to_check = src_uri_expansion.keys()[0]\n if (src_uri_to_check.names_container() and dst_uri.names_container() and\n os.path.exists(dst_uri.object_name)):\n new_name = ('%s%s%s' % (dst_uri.object_name, os.sep,\n src_uri_to_check.bucket_name)).rstrip('/')\n dst_uri = dst_uri.clone_replace_name(new_name)\n # Create dest directory if needed.\n if dst_uri.is_file_uri() and not os.path.exists(dst_uri.object_name):\n os.makedirs(dst_uri.object_name)\n return dst_uri", "def __eq__(self, other):\n return type(self) == type(other) and self._full_path == other.full_path", "def is_allow(self, src: Vertex, dst: Vertex) -> bool:\n if self.link == Link.NONE:\n return False\n elif self.link == Link.BI:\n return src in self.vertices or dst in self.vertices\n return src == self.src and dst == self.dst", "def ConstructDstUri(self, src_uri, exp_src_uri, base_dst_uri):\n if base_dst_uri.names_container():\n # To match naming semantics of UNIX 'cp' command, copying files\n # to buckets/dirs should result in objects/files named by just the\n # final filename component; while copying directories should result\n # in objects/files mirroring the directory hierarchy. Example of the\n # first case:\n # gsutil cp dir1/file1 gs://bucket\n # should create object gs://bucket/file1\n # Example of the second case:\n # gsutil cp dir1/dir2 gs://bucket\n # should create object gs://bucket/dir2/file2 (assuming dir1/dir2\n # contains file2).\n if src_uri.names_container():\n dst_path_start = (src_uri.object_name.rstrip(os.sep)\n .rpartition(os.sep)[-1])\n start_pos = exp_src_uri.object_name.find(dst_path_start)\n dst_key_name = exp_src_uri.object_name[start_pos:]\n else:\n # src is a file or object, so use final component of src name.\n dst_key_name = os.path.basename(exp_src_uri.object_name)\n if base_dst_uri.is_file_uri():\n # dst names a directory, so append src obj name to dst obj name.\n dst_key_name = '%s%s%s' % (base_dst_uri.object_name, os.sep,\n dst_key_name)\n self.CheckForDirFileConflict(exp_src_uri, dst_key_name)\n else:\n # dest is an object or file: use dst obj name\n dst_key_name = base_dst_uri.object_name\n return base_dst_uri.clone_replace_name(dst_key_name)", "def is_connected(src, dst):\n for precursor in dst.precursor_nodes:\n if src == precursor.split(\":\")[0]:\n return 1\n return 0", "def test_equals_with_different_sources(self):\n measurement_1 = Measurement(self.metric(), sources=[{\"source_uuid\": SOURCE_ID}])\n measurement_2 = Measurement(self.metric())\n self.assertFalse(measurement_1.equals(measurement_2))", "def check(self, src, dst, map=True):\n if map:\n map = mapping.create_mapping(src)\n else:\n map = src\n if map != dst:\n dumper.dumpDoc(map)\n print \"---- vs ----\"\n dumper.dumpDoc(dst)\n self.assertEqual(map, dst)\n self.assertEqual(dst, map) # do the vice versa test too\n return map", "def __eq__(self, other):\n return self.storage_ip == other.storage_ip and self.client_port == other.client_port and self.command_port == other.command_port and self.files == other.files", "def is_duplicate(self, event):\n # only checking remote and expected remote for the endpoint. We don't care about names,\n # interfaces/tunnels, or pctags for dup stale suppression\n return (self.remote == event.remote and self.expected_remote == event.expected_remote)", "def to_move(src: str, dst: str) -> bool:\n\n if not islink(src):\n with suppress(Exception):\n move(src, dst)\n return True\n return False", "def ErrorCheckCopyRequest(self, src_uri_expansion, dst_uri_str, headers,\n debug, command='cp'):\n for src_uri in src_uri_expansion:\n if src_uri.is_cloud_uri() and not src_uri.bucket_name:\n raise CommandException('Provider-only src_uri (%s)')\n\n if ContainsWildcard(dst_uri_str):\n matches = list(self.CmdWildcardIterator(dst_uri_str, headers=headers,\n debug=debug))\n if len(matches) > 1:\n raise CommandException('Destination (%s) matches more than 1 URI' %\n dst_uri_str)\n base_dst_uri = matches[0]\n else:\n base_dst_uri = self.StorageUri(dst_uri_str, debug=debug)\n\n # Make sure entire expansion didn't result in nothing to copy. This can\n # happen if user request copying a directory w/o -r option, for example.\n have_work = False\n for v in src_uri_expansion.values():\n if v:\n have_work = True\n break\n if not have_work:\n raise CommandException('Nothing to copy')\n\n # If multi-object copy request ensure base_dst_uri names a container.\n multi_src_request = (len(src_uri_expansion) > 1 or\n len(src_uri_expansion.values()[0]) > 1)\n if multi_src_request:\n self.InsistUriNamesContainer(command, base_dst_uri)\n\n # Ensure no src/dest pairs would overwrite src. Note that this is\n # more restrictive than the UNIX 'cp' command (which would, for example,\n # allow \"mv * dir\" and just skip the implied mv dir dir). We disallow such\n # partial completion operations in cloud copies because they are risky.\n for src_uri in iter(src_uri_expansion):\n for exp_src_uri in src_uri_expansion[src_uri]:\n new_dst_uri = self.ConstructDstUri(src_uri, exp_src_uri, base_dst_uri)\n if self.SrcDstSame(exp_src_uri, new_dst_uri):\n raise CommandException('cp: \"%s\" and \"%s\" are the same object - '\n 'abort.' % (exp_src_uri.uri, new_dst_uri.uri))\n\n return (base_dst_uri, multi_src_request)", "def is_duplicate_content_url(url1, url2):\n if url1 == url2:\n return True\n if url2 in url1:\n url1 = shorten_duplicate_content_url(url1)\n if not url2.endswith('/') and url1.endswith('/'):\n url2 += '/'\n return url1 == url2\n if url1 in url2:\n url2 = shorten_duplicate_content_url(url2)\n if not url1.endswith('/') and url2.endswith('/'):\n url1 += '/'\n return url1 == url2\n return False", "def __eq__(self, other):\n return self.doc_type == other.doc_type and \\\n self.src == other.src and \\\n self.name == other.name", "def copy_tree_checker(src, dst):\n copy_tree(src, dst)\n return True", "def CheckForDirFileConflict(self, src_uri, dst_path):\n final_dir = os.path.dirname(dst_path)\n if os.path.isfile(final_dir):\n raise CommandException('Cannot retrieve %s because it a file exists '\n 'where a directory needs to be created (%s).' %\n (src_uri, final_dir))\n if os.path.isdir(dst_path):\n raise CommandException('Cannot retrieve %s because a directory exists '\n '(%s) where the file needs to be created.' %\n (src_uri, dst_path))", "def _is_equal_same_type(self, other):\n # id\n self_id = self.id\n other_id = other.id\n if (self_id and other_id) and (self_id != other_id):\n return False\n \n # bot\n if self.bot != other.bot:\n return False\n \n # description\n if self.description != other.description:\n return False\n \n # icon_hash\n if self.icon_hash != other.icon_hash:\n return False\n \n # icon_type\n if self.icon_type != other.icon_type:\n return False\n \n # name\n if self.name != other.name:\n return False\n \n return True", "def __eq__(self, other):\n if not isinstance(other, AwsS3PresignedUrlForUpload):\n return False\n\n return self.to_dict() == other.to_dict()", "def assertObjEquals(self, rhs, lhs, is_saved=False):\n\n if isinstance(lhs, list) and isinstance(rhs, list):\n self.assertEqual(len(lhs), len(rhs))\n for l_folder in lhs:\n r_folder = next(n for n in rhs if n[\"name\"] == l_folder[\"name\"])\n self.assertObjEquals(l_folder, r_folder)\n return\n\n self.assertEqual(rhs[\"type\"], lhs[\"type\"])\n self.assertEqual(rhs[\"name\"], lhs[\"name\"])\n if lhs[\"type\"] == \"folder\":\n if is_saved:\n self.assertDictEqual(rhs['mapping'], lhs['mapping'])\n self.assertEqual(len(lhs[\"children\"]), len(rhs[\"children\"]))\n for child in lhs[\"children\"]:\n r_child = next(c for c in rhs[\"children\"] if c[\"name\"] == child[\"name\"])\n self.assertObjEquals(child, r_child, is_saved)\n else:\n if \"filename\" in rhs or \"filename\" in lhs:\n try:\n self.assertEqual(rhs[\"download_link\"], lhs[\"download_link\"])\n self.assertEqual(rhs[\"filename\"], lhs[\"filename\"])\n except Exception:\n import pdb; pdb.set_trace() # DEBUG\n self.assertEqual(rhs[\"predownload_link\"], lhs[\"predownload_link\"])", "def is_identical(self, other):\n if self.is_input != other.is_input:\n return False\n\n if self.is_raw() and other.is_raw():\n return True\n if self.is_raw() or other.is_raw():\n return False\n return self.structure.is_identical(other.structure)", "def __eq__(self, other):\n if not isinstance(other, UploadSnap):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other: 'Resource') -> bool:\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.storage_ip == other.storage_ip and self.client_port == other.client_port", "def is_same_object(self, mu, env, ref1, ref2):\n logger.debug(\"JNIEnv->IsSameObject(%d, %d) was called\" % (ref1, ref2))\n\n if ref1 == 0 and ref2 == 0:\n return JNI_TRUE\n\n obj1 = self.get_reference(ref1)\n obj2 = self.get_reference(ref2)\n\n if obj1 is obj2:\n return JNI_TRUE\n\n return JNI_FALSE", "def __ne__(self, other):\n if not isinstance(other, AwsS3PresignedUrlForUpload):\n return True\n\n return self.to_dict() != other.to_dict()" ]
[ "0.6530111", "0.63638157", "0.6347976", "0.6287368", "0.5945011", "0.59095615", "0.5890288", "0.5871432", "0.58332175", "0.58163655", "0.58120084", "0.57903457", "0.5762235", "0.571436", "0.5659043", "0.5610552", "0.5602035", "0.5590678", "0.5589833", "0.55659765", "0.55647314", "0.5494561", "0.54942566", "0.54902667", "0.5479612", "0.5472489", "0.5472393", "0.5471464", "0.54631054", "0.5459392" ]
0.8208193
0
Constructs a destination URI for CopyObjsCommand.
def ConstructDstUri(self, src_uri, exp_src_uri, base_dst_uri): if base_dst_uri.names_container(): # To match naming semantics of UNIX 'cp' command, copying files # to buckets/dirs should result in objects/files named by just the # final filename component; while copying directories should result # in objects/files mirroring the directory hierarchy. Example of the # first case: # gsutil cp dir1/file1 gs://bucket # should create object gs://bucket/file1 # Example of the second case: # gsutil cp dir1/dir2 gs://bucket # should create object gs://bucket/dir2/file2 (assuming dir1/dir2 # contains file2). if src_uri.names_container(): dst_path_start = (src_uri.object_name.rstrip(os.sep) .rpartition(os.sep)[-1]) start_pos = exp_src_uri.object_name.find(dst_path_start) dst_key_name = exp_src_uri.object_name[start_pos:] else: # src is a file or object, so use final component of src name. dst_key_name = os.path.basename(exp_src_uri.object_name) if base_dst_uri.is_file_uri(): # dst names a directory, so append src obj name to dst obj name. dst_key_name = '%s%s%s' % (base_dst_uri.object_name, os.sep, dst_key_name) self.CheckForDirFileConflict(exp_src_uri, dst_key_name) else: # dest is an object or file: use dst obj name dst_key_name = base_dst_uri.object_name return base_dst_uri.clone_replace_name(dst_key_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_request_url():\n url = 'http'\n if _config['save']:\n url += 's'\n url += '://{}:{}/move'.format(_config['ip'], _config['port'])\n return url", "def UriStrFor(iterated_uri, obj):\n return '%s://%s/%s' % (iterated_uri.scheme, obj.bucket.name, obj.name)", "def build_target_uri(self, **kwargs):\n return self._build_uri(**kwargs)", "def destination(self) -> str:\n for a in self.args:\n if a[0] != '-':\n return a\n try:\n return self.kwargs['dest']\n except KeyError:\n for a in self.args:\n if a.startswith('--'):\n dest = a.lstrip('-').replace('-', '_')\n if dest.isidentifier():\n return dest\n raise AttributeError(F'The argument with these values has no destination: {self!r}')", "def _uri_realm_creator(self, endpoint=\"json\", realm=None, uri=None, arguments=None):\n if realm is not None:\n uri = endpoint + '/' + realm + '/' + uri\n else:\n uri = endpoint + '/' + uri\n\n if arguments is not None:\n uri += arguments\n\n return uri", "def construct_url(self,*path):\n base = self.request.protocol+\"://\"+self.request.host+\"/\"\n return base+\"/\".join(path)", "def _make_url(self, path):\n if not self.base_location:\n raise ValueError(\"No base_location set. Cannot construct url.\")\n\n if path:\n path = self._normalise_last_slashes(path)\n path = self._normalise_head_slashes(path)\n\n return \"\".join((self.base_location, self.endpoint, path))", "def construct_url(self):\n path = [self.path]\n path.extend([str(x) for x in self.params])\n\n url = self.client.base_url + '/'.join(x for x in path if x)\n query = self.kwargs.get('query')\n\n if query:\n # Dict -> List\n if type(query) is dict:\n query = query.items()\n\n # Remove items with `None` value\n query = [\n (k, v) for (k, v) in query\n if v is not None\n ]\n\n # Encode query, append to URL\n url += '?' + urlencode(query)\n\n return url", "def _make_url(self):\n ...", "def CopyObjsCommand(self, args, sub_opts=None, headers=None, debug=0,\n command='cp'):\n # Expand wildcards and containers in source StorageUris.\n src_uri_expansion = self.ExpandWildcardsAndContainers(\n args[0:len(args)-1], sub_opts, headers, debug)\n\n # Check for various problems and determine base_dst_uri based for request.\n (base_dst_uri, multi_src_request) = self.ErrorCheckCopyRequest(\n src_uri_expansion, args[-1], headers, debug, command)\n # Rewrite base_dst_uri and create dest dir as needed for multi-source copy.\n if multi_src_request:\n base_dst_uri = self.HandleMultiSrcCopyRequst(src_uri_expansion,\n base_dst_uri)\n\n # Now iterate over expanded src URIs, and perform copy operations.\n total_elapsed_time = total_bytes_transferred = 0\n for src_uri in iter(src_uri_expansion):\n for exp_src_uri in src_uri_expansion[src_uri]:\n print 'Copying %s...' % exp_src_uri\n dst_uri = self.ConstructDstUri(src_uri, exp_src_uri, base_dst_uri)\n (elapsed_time, bytes_transferred) = self.PerformCopy(\n exp_src_uri, dst_uri, sub_opts, headers, debug)\n total_elapsed_time += elapsed_time\n total_bytes_transferred += bytes_transferred\n if debug == 3:\n # Note that this only counts the actual GET and PUT bytes for the copy\n # - not any transfers for doing wildcard expansion, the initial HEAD\n # request boto performs when doing a bucket.get_key() operation, etc.\n if total_bytes_transferred != 0:\n print 'Total bytes copied=%d, total elapsed time=%5.3f secs (%sps)' % (\n total_bytes_transferred, total_elapsed_time,\n MakeHumanReadable(float(total_bytes_transferred) /\n float(total_elapsed_time)))", "def _make_url(self, url_part, blueprint_prefix):\n parts = (blueprint_prefix, self.prefix, url_part)\n return ''.join(_ for _ in parts if _)", "def dest(self):\n if self._dest:\n return self._dest\n\n # Parse and create a new client\n conn = parse_url(self.dest_url)\n client = get_client(conn)\n self._dest = client\n return self._dest", "def dest(self):\n if self._dest:\n return self._dest\n\n # Parse and create a new client\n conn = parse_url(self.dest_url)\n client = get_client(conn)\n self._dest = client\n return self._dest", "def __url(self, object):\n return '/'.join(object.getPhysicalPath())", "def HandleMultiSrcCopyRequst(self, src_uri_expansion, dst_uri):\n # If src_uri and dst_uri both name containers, handle\n # two cases to make copy command work like UNIX \"cp -r\" works:\n # a) if dst_uri names a non-existent directory, copy objects to a new\n # directory with the dst_uri name. In this case,\n # gsutil gs://bucket/a dir\n # should create dir/a.\n # b) if dst_uri names an existing directory, copy objects under that\n # directory. In this case,\n # gsutil gs://bucket/a dir\n # should create dir/bucket/a.\n src_uri_to_check = src_uri_expansion.keys()[0]\n if (src_uri_to_check.names_container() and dst_uri.names_container() and\n os.path.exists(dst_uri.object_name)):\n new_name = ('%s%s%s' % (dst_uri.object_name, os.sep,\n src_uri_to_check.bucket_name)).rstrip('/')\n dst_uri = dst_uri.clone_replace_name(new_name)\n # Create dest directory if needed.\n if dst_uri.is_file_uri() and not os.path.exists(dst_uri.object_name):\n os.makedirs(dst_uri.object_name)\n return dst_uri", "def prepare_resource_uri(self, object):\n return '/api/v1/actor/{0}/'.format(object.id)", "def get_create_url(self, resource_obj=None, **kwargs):\n\n return self._generate_url(\n url_type='create', resource_obj=resource_obj, **kwargs\n )", "def get_obj_uri(self, ports=None, initiators=None):\n ports_uri = []\n initiators_uri = []\n\n if ports:\n ports_uri = [\"/vplex/v2/clusters/{0}/exports/ports/{1}\".format(\n self.cl_name, port) for port in ports]\n if initiators:\n uri = \"/vplex/v2/clusters/{}/exports/initiator_ports/{}\"\n initiators_uri = [uri.format(\n self.cl_name, initiator) for initiator in initiators]\n return (ports_uri, initiators_uri)", "def _build_uri(self, **kwargs):\n target_uri, version = str(), None\n\n if kwargs.get('category') not in ['performance', 'common']:\n version = self._build_uri_get_version(kwargs.get('version'),\n kwargs.get('no_version'))\n if version:\n target_uri += '/{version}'.format(version=version)\n\n target_uri += '/{category}'.format(\n category=kwargs.get('category'))\n\n if kwargs.get('resource_level'):\n target_uri += '/{resource_level}'.format(\n resource_level=kwargs.get('resource_level'))\n\n if kwargs.get('resource_level_id'):\n target_uri += '/{resource_level_id}'.format(\n resource_level_id=kwargs.get('resource_level_id'))\n\n if kwargs.get('resource_type'):\n target_uri += '/{resource_type}'.format(\n resource_type=kwargs.get('resource_type'))\n if kwargs.get('resource_type_id'):\n target_uri += '/{resource_type_id}'.format(\n resource_type_id=kwargs.get('resource_type_id'))\n\n if kwargs.get('resource'):\n target_uri += '/{resource}'.format(\n resource=kwargs.get('resource'))\n if kwargs.get('resource_id'):\n target_uri += '/{resource_id}'.format(\n resource_id=kwargs.get('resource_id'))\n\n if kwargs.get('object_type'):\n target_uri += '/{object_type}'.format(\n object_type=kwargs.get('object_type'))\n if kwargs.get('object_type_id'):\n target_uri += '/{object_type_id}'.format(\n object_type_id=kwargs.get('object_type_id'))\n\n return target_uri", "def join(cls, *args):\n return AbsolutePath(os.path.join(*(str(piece) for piece in args)))", "def construct_url(self, local_json: Dict) -> str:\n url_str = \"\"\n\n for arg in self.get_url_args():\n if arg == \"merchantId\":\n url_str = url_str + str(self.merchant_id) + \"/\"\n elif arg == \"signature\":\n url_str = url_str + str(self.get_url_signature(local_json)) + \"/\"\n else:\n url_str = url_str + str(local_json[arg]) + \"/\"\n\n return urljoin(self.get_url(), url_str[:-1])", "def createURI():\n ret = libxml2mod.xmlCreateURI()\n if ret is None:raise uriError('xmlCreateURI() failed')\n return URI(_obj=ret)", "def create_url(_origin_details, travel_start_date, travel_start_time, destination_list):\n prefix = 'https://timetable.search.ch/api/route.json?one_to_many=1'\n\n origin_body = f'&from={_origin_details}&date={travel_start_date}&time={travel_start_time}'\n\n # Build iteratively with necessary syntax between destinations\n destination_body = ''\n for i, dest in enumerate(destination_list):\n destination_body = f'{destination_body}&to[{i}]={dest}'\n\n return f'{prefix}{origin_body}{destination_body}'", "def _build_url(self, tail_end):\n url = self._doc_class.urlobject.format(self._cb.credentials.org_key) + tail_end\n return url", "def _build_url(self, tail_end):\n url = self._doc_class.urlobject.format(self._cb.credentials.org_key) + tail_end\n return url", "def create_url(self):\n self.base_url = self.base + self.strs[jpn.path_latest]", "def _urljoin(self, *args):\r\n\t\treturn \"/\".join(map(lambda x: str(x).rstrip('/'), args))", "def destination(self) -> pulumi.Input['DestinationArgs']:\n return pulumi.get(self, \"destination\")", "def do_destination(self, args):\n self.destination = int(args)", "def __init__(self, origin, destination):\n self.origin = origin\n self.destination = destination" ]
[ "0.5702773", "0.56189567", "0.5521624", "0.550445", "0.54854167", "0.546202", "0.5313209", "0.5293089", "0.5288539", "0.52817786", "0.5187029", "0.5181945", "0.5181945", "0.5154946", "0.51415575", "0.51282495", "0.50870293", "0.505311", "0.50460774", "0.5034968", "0.5026224", "0.50068337", "0.49969998", "0.49916297", "0.49916297", "0.49646583", "0.49643865", "0.49269548", "0.49250975", "0.49127522" ]
0.60834557
0
Print listing info for given bucket.
def PrintBucketInfo(self, bucket_uri, listing_style, headers=None, debug=0): bucket_objs = 0 bucket_bytes = 0 if listing_style == ListingStyle.SHORT: print bucket_uri else: try: for obj in self.CmdWildcardIterator( bucket_uri.clone_replace_name('*'), ResultType.KEYS, headers=headers, debug=debug): bucket_objs += 1 bucket_bytes += obj.size except WildcardException, e: # Ignore non-matching wildcards, to allow empty bucket listings. if e.reason.find('No matches') == -1: raise e if listing_style == ListingStyle.LONG: print '%s : %s objects, %s' % ( bucket_uri, bucket_objs, MakeHumanReadable(bucket_bytes)) else: # listing_style == ListingStyle.LONG_LONG: print '%s :\n\t%s objects, %s\n\tACL: %s' % ( bucket_uri, bucket_objs, MakeHumanReadable(bucket_bytes), bucket_uri.get_acl(False, headers)) return (bucket_objs, bucket_bytes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_bucket(self, bucket):\n\n self.response.write(\"Listbucket result:\\n\")\n\n # Production apps should set page_size to a practical value.\n page_size = 1\n stats = cloudstorage.listbucket(bucket + \"/foo\", max_keys=page_size)\n while True:\n count = 0\n for stat in stats:\n count += 1\n self.response.write(repr(stat))\n self.response.write(\"\\n\")\n\n if count != page_size or count == 0:\n break\n stats = cloudstorage.listbucket(\n bucket + \"/foo\", max_keys=page_size, marker=stat.filename\n )", "def list_bucket(self, bucket):\n self.response.write('Listbucket result:\\n')\n\n page_size = 1\n stats = gcs.listbucket(bucket + '/foo', max_keys=page_size)\n while True:\n count = 0\n for stat in stats:\n count += 1\n self.response.write(repr(stat))\n self.response.write('\\n')\n\n if count != page_size or count == 0:\n break\n stats = gcs.listbucket(bucket + '/foo', max_keys=page_size,\n marker=stat.filename)", "def _PrintInfoAboutBucketListingRef(self, bucket_listing_ref):\n uri = bucket_listing_ref.GetUri()\n obj = bucket_listing_ref.GetKey()\n uri_str = UriStrForObj(uri, obj, self.all_versions)\n\n if isinstance(obj, DeleteMarker):\n size_string = '0'\n numobjs = 0\n numbytes = 0\n else:\n size_string = (MakeHumanReadable(obj.size)\n if self.human_readable else str(obj.size))\n numobjs = 1\n numbytes = obj.size\n\n if not self.summary_only:\n sys.stdout.write('%(size)-10s %(uri)s%(ending)s' % {\n 'size': size_string,\n 'uri': uri_str.encode('utf-8'),\n 'ending': self.line_ending})\n\n return numobjs, numbytes", "def list_bucket(self, bucket):\n self.response.write('Creating more files for listbucket...\\n')\n self.create_file(bucket + '/foo1')\n self.create_file(bucket + '/foo2')\n self.response.write('\\nListbucket result:\\n')\n\n page_size = 1\n stats = gcs.listbucket(bucket, max_keys=page_size)\n while True:\n count = 0\n for stat in stats:\n count += 1\n self.response.write(repr(stat))\n self.response.write('\\n')\n\n if count != page_size or count == 0:\n break\n last_filename = stat.filename[len(bucket) + 1:]\n stats = gcs.listbucket(bucket, max_keys=page_size, marker=last_filename)", "def _PrintInfoAboutBucketListingRef(self, bucket_listing_ref):\n obj = bucket_listing_ref.root_object\n url_str = bucket_listing_ref.url_string\n if (obj.metadata and\n S3_DELETE_MARKER_GUID in obj.metadata.additionalProperties):\n size_string = '0'\n num_bytes = 0\n num_objs = 0\n url_str += '<DeleteMarker>'\n else:\n size_string = (MakeHumanReadable(obj.size)\n if self.human_readable else str(obj.size))\n num_bytes = obj.size\n num_objs = 1\n\n if not self.summary_only:\n url_detail = '{size:<11} {url}{ending}'.format(\n size=size_string,\n url=six.ensure_text(url_str),\n ending=six.ensure_text(self.line_ending))\n print_to_fd(url_detail, file=sys.stdout, end='')\n\n return (num_objs, num_bytes)", "def list_buckets():\n for bucket in BUCKET_MANAGER.all_buckets():\n print(bucket)", "def list_buckets():\n for bucket in s3.buckets.all():\n print(bucket)", "def list_bucket_objects(bucket):\n for obj in BUCKET_MANAGER.all_objects(bucket).all():\n print(obj)", "def bucket_info(args):\n\n args.suppress_verify_output = True\n if verify(args) != 0:\n # restore stdout\n sys.stdout = sys.__stdout__\n print(\"Config file not valid, please use the verify function to debug\")\n return 1\n\n with open(args.file, \"r\") as f:\n config_json = json.load(f)\n\n for group in config_json[\"groups\"]:\n if group[\"name\"] == args.group:\n for bucket in group[\"buckets\"]:\n if bucket[\"name\"] == args.bucket:\n print(json.dumps(bucket, indent=4))\n return 0\n break\n\n print(\"No bucket matching {} found\".format(args.bucket))\n return 1", "def listAllBuckets(self):\n print self.getAllBuckets()", "def ls(region_name=DEFAULT_REGION):\n s3conn = s3.connect_to_region(region_name)\n buckets = s3conn.get_all_buckets()\n for bucket in buckets:\n print(bucket.name)", "def list_buckets():\n response = s3.list_buckets()\n # Output the bucket names\n print('Existing buckets:')\n for bucket in response['Buckets']:\n print(bucket[\"Name\"])", "def list_bucket(self, bucket_id=None):\n url = self.prism_endpoint + \"/wBuckets\"\n\n if bucket_id is not None:\n url = url + \"/\" + bucket_id\n\n headers = {\"Authorization\": \"Bearer \" + self.bearer_token}\n\n r = requests.get(url, headers=headers)\n\n if r.status_code == 200:\n logging.info(\"Successfully obtained information about your buckets\")\n return r.json()\n else:\n logging.warning(\"HTTP Error {}\".format(r.status_code))", "def list(number=0):\n buckets = get_buckets(number)\n data = [(\"Name\", \"Versioned\", \"LifeCycle\")]\n col_width = [0, 0, 0]\n rows = []\n for name, bucket in buckets.items():\n v = bucket[\"versioning\"]\n l = bucket[\"lifecycle\"]\n v = v if v else \"Disabled\"\n if l:\n l = json.dumps(l, indent=1)\n else:\n l = \"None\"\n data.append((name, v, l))\n for row in data:\n for i, info in enumerate(row):\n col_width[i] = min(max(len(info) + 2, col_width[i]), 48)\n dashes = tuple((\"-\" * (width - 1) for width in col_width))\n data.insert(1, dashes)\n click.echo(f\"The status of the buckets:\")\n for row in data:\n output = \"\"\n for i in range(3):\n output += row[i].ljust(col_width[i])\n if not VERBOSE:\n click.echo(output)\n logger.info(output)", "def ListCommand(self, args, sub_opts=None, headers=None, debug=0):\n listing_style = ListingStyle.SHORT\n get_bucket_info = False\n if sub_opts:\n for o, unused_a in sub_opts:\n if o == '-b':\n get_bucket_info = True\n if o == '-l':\n listing_style = ListingStyle.LONG\n if o == '-L':\n listing_style = ListingStyle.LONG_LONG\n if not args:\n # default to listing all gs buckets\n args = ['gs://']\n\n total_objs = 0\n total_bytes = 0\n for uri_str in args:\n uri = self.StorageUri(uri_str, debug=debug, validate=False)\n\n if not uri.bucket_name:\n # Provider URI: add bucket wildcard to list buckets.\n for uri in self.CmdWildcardIterator('%s://*' % uri.scheme,\n headers=headers, debug=debug):\n (bucket_objs, bucket_bytes) = self.PrintBucketInfo(uri, listing_style,\n headers=headers,\n debug=debug)\n total_bytes += bucket_bytes\n total_objs += bucket_objs\n\n elif not uri.object_name:\n if get_bucket_info:\n # ls -b request on provider+bucket URI: List info about bucket(s).\n for uri in self.CmdWildcardIterator(uri, headers=headers,\n debug=debug):\n (bucket_objs, bucket_bytes) = self.PrintBucketInfo(uri,\n listing_style,\n headers=headers,\n debug=debug)\n total_bytes += bucket_bytes\n total_objs += bucket_objs\n else:\n # ls request on provider+bucket URI: List objects in the bucket(s).\n for obj in self.CmdWildcardIterator(uri.clone_replace_name('*'),\n ResultType.KEYS,\n headers=headers, debug=debug):\n total_bytes += self.PrintObjectInfo(uri, obj, listing_style,\n headers=headers, debug=debug)\n total_objs += 1\n\n else:\n # Provider+bucket+object URI -> list the object(s).\n for obj in self.CmdWildcardIterator(uri, ResultType.KEYS,\n headers=headers, debug=debug):\n total_bytes += self.PrintObjectInfo(uri, obj, listing_style,\n headers=headers, debug=debug)\n total_objs += 1\n if listing_style != ListingStyle.SHORT:\n print ('TOTAL: %d objects, %d bytes (%s)' %\n (total_objs, total_bytes, MakeHumanReadable(float(total_bytes))))", "def list_buckets():\n pass", "def list_all_buckets(riak_host,riak_port):\n url='http://%s:%s/buckets?buckets=true' % (riak_host,riak_port)\n r=requests.get(url)\n print json.dumps(r.json(), sort_keys=True, indent=4)", "def display_book(self):\n print(\"List of books available is: \")\n for book in books_list :\n print(\"- \",book)", "def bucket_lister(bucket, prefix='', delimiter='', marker='', headers=None):\r\n more_results = True\r\n k = None\r\n while more_results:\r\n rs = bucket.get_all_keys(prefix=prefix, marker=marker,\r\n delimiter=delimiter, headers=headers)\r\n for k in rs:\r\n yield k\r\n if k:\r\n marker = k.name\r\n more_results= rs.is_truncated", "def get_bucketlist():\n pass", "def list(aMap):\n\t#iterates through every bucket in aMap\n\tfor bucket in aMap:\n\t\t#if the bucket contains information...\n\t\tif bucket:\n\t\t\t#unpacks the key/value and prints\n\t\t\tfor k, v in bucket:\n\t\t\t\tprint k, v", "def __repr__(self):\n return '<Bucketlist {0} : {1}>'.format(self.bucketlist_id, self.name)", "def print(self):\n print(\"Repository list: \")\n for repo in self.list:\n print(\"- \" + repo.name)", "def list_buckets(args):\n\n args.suppress_verify_output = True\n if verify(args) != 0:\n # restore stdout\n sys.stdout = sys.__stdout__\n print(\"Config file not valid, please use the verify function to debug\")\n return\n\n with open(args.file, \"r\") as f:\n config_json = json.load(f)\n\n for group in config_json[\"groups\"]:\n print(group[\"name\"] + \":\")\n for bucket in group[\"buckets\"]:\n print(\"\\t\" + bucket[\"name\"])", "def list_all_keys(riak_host,riak_port,bucket):\n url='http://%s:%s/buckets/%s/keys?keys=true' % (riak_host,riak_port,bucket)\n #print url\n r=requests.get(url)\n print json.dumps(r.json(), sort_keys=True, indent=4)", "def bucket_info(request):\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n return JsonResponse(request.session[\"analytics\"].bucket_info())", "def describe_objects(bucket_name):\n # Upload the file\n try:\n # Retrieve list of files in bucket\n response = s3.list_objects_v2(Bucket=bucket_name)\n files = response.get(\"Contents\")\n # Output file names\n for file in files:\n print(f\"file_name: {file['Key']}, size: {file['Size']}\")\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def print_catalog(self):\n for book in self.books.keys():\n print(book)", "def print_catalog(self):\n for book in self.books.keys():\n print(book)", "def __str__(self):\n result = \"\"\n for i in range(len(self.__buckets)):\n result += \"Bucket \" + str(i) + \": \" + str(self.__buckets[i]) + \"\\n\"\n return result" ]
[ "0.7354716", "0.7352198", "0.71958464", "0.71349263", "0.7099918", "0.68598676", "0.6761879", "0.64579564", "0.63793075", "0.63526654", "0.6350501", "0.62969315", "0.62795097", "0.6077442", "0.5934571", "0.5862507", "0.58489895", "0.5740662", "0.5668863", "0.56325465", "0.5594633", "0.55939823", "0.5589235", "0.5580994", "0.5576541", "0.5568101", "0.5562127", "0.55250263", "0.55250263", "0.5523841" ]
0.78802145
0
This func return data for top countries throughout time.
def topCountries(top=10): #top 10 deadly countries countries = agg('country')[:top].index #grab aggregated data for these countries dataOfTop10 = agg(['year','country']).query("country in @countries")### interesting... #unstack data dataOfTop10 = dataOfTop10.unstack(1) #remove multiindexes dataOfTop10 = dataOfTop10.transpose().reset_index(level=0, drop=True).transpose() #sort by year dataOfTop10.sort_index(inplace=True) return dataOfTop10
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_top_country(self):\n tabular_format_countries_list = [['Canada', 66, '20'], ['United States', 33, '10']]\n\n result = InstallationStatistics.get_statistics_top_country(tabular_format_countries_list)\n\n self.assertEqual('Canada', result)", "def topBrandsandCountries(df, countries_unique):\n top_countries = {}\n for x in countries_unique:\n if df[df.geo_country==x].device_brand_name.count() > 500:\n top_countries[x] = df[df.geo_country==x].device_brand_name.count()\n\n top_3_brands = ['Apple','Samsung','Huawei']\n\n apple = []\n samsung = []\n huawei = []\n for x in top_countries.keys():\n apple.append(df[df.geo_country==x][df.device_brand_name==top_3_brands[0]].device_brand_name.count())\n samsung.append(df[df.geo_country==x][df.device_brand_name==top_3_brands[1]].device_brand_name.count())\n huawei.append(df[df.geo_country==x][df.device_brand_name==top_3_brands[2]].device_brand_name.count()) \n\n return top_countries,apple,samsung,huawei", "def get_top_five_countries():\n countries=country_populations.split('\\n')\n top_5=[]\n count=0\n for country in countries:\n if count<6:\n data= country.split('\\t')\n top_5.append(data[1])\n count+=1\n top_5.remove('Country')\n return top_5", "def temperatures_by_country_till2013():\r\n\r\n # Columns: dt, AverageTemperature, AverageTemperatureUncertainty, Country\r\n temperatures = pd.read_csv(\"GlobalLandTemperatures/GlobalLandTemperaturesByCountry.csv\")\r\n\r\n # 577 462 rows\r\n print(len(temperatures))\r\n\r\n countries = temperatures['Country'].unique()\r\n print(len(countries))\r\n print(sorted(countries))", "def __top_prod_countries(df, number_of_countries):\n\t\tmlb = MultiLabelBinarizer()\n\t\ttemp = pd.DataFrame(mlb.fit_transform(df['production_countries']), columns=mlb.classes_, index=df.index)\n\t\treturn temp.sum().sort_values(ascending=False)[:number_of_countries].index.values", "def get_top_10(df):\n\n grouped_df = df.groupby(\"country\").max()\n\n # Confirmed cases\n print(grouped_df.sort_values(\"confirmed\",\n ascending=False)[\"confirmed\"][:10])\n\n # Deaths\n print(grouped_df.sort_values(\"deaths\", ascending=False)[\"deaths\"][:10])\n\n # Recoveries\n print(grouped_df.sort_values(\"recovered\",\n ascending=False)[\"recovered\"][:10])\n\n a = grouped_df.sort_values(\"recovered\", ascending=False)[\"recovered\"][:10]\n print(a.to_markdown())", "def summary_table(countries: List[str]):\n \n df_list = []\n \n for country in countries:\n acceleration_figures = acceleration(country)\n pop = COUNTRY_DATA[country]['population']\n df_list.append(\n [\n country,\n COUNTRY_DATA[country]['data'].confirmed[-1],\n int(acceleration_figures[0] * pop),\n COUNTRY_DATA[country]['data'].deaths[-1],\n int(acceleration_figures[1] * pop),\n ]\n )\n\n return df_list", "def get_top_nationalities(result, n=5):\n nat_freq=pd.DataFrame(result['country'].value_counts())\n ratios=nat_freq[:n]/nat_freq.sum()*100\n res='The most common visitors are from'\n for i in range(0,len(ratios)):\n if i!=len(ratios)-1:\n res=res+f' {ratios.index[i]} ({np.round(ratios.country[i],2)}%),'\n else:\n res=res+f' and {ratios.index[i]} ({np.round(ratios.country[i],2)}%).'\n return res", "def countries():\r\n\r\n # Use Pandas to perform the sql query\r\n results = db.session.query(Worldmapdata.to_country.distinct().label(\"to_country\"))\r\n country = [row.to_country for row in results.all()]\r\n # Return a list of the column names (sample names)\r\n return jsonify(list(country))", "def top_groups():\n groups = Group.objects.filter(country='PT').order_by('-members')[:10]\n df = pd.DataFrame.from_records(groups.values())\n return df", "def temperatures_by_city_till2013():\r\n\r\n # Columns: dt,AverageTemperature,AverageTemperatureUncertainty,City,Country,Latitude,Longitude\r\n temperatures = pd.read_csv(\"GlobalLandTemperatures/GlobalLandTemperaturesByCity.csv\")\r\n\r\n # 8 599 212 rows\r\n print(len(temperatures))\r\n\r\n countries = temperatures['Country'].unique()\r\n print(len(countries))\r\n print(sorted(countries))", "def AllCountries():\n print(\"TOUS LES PAYS\\n\")\n for x in countries:\n nom = countries[x]['name']\n capital = countries[x]['capital']\n continent = countries[x]['location']\n independance = countries[x]['independance']\n president = countries[x]['president']\n langue = countries[x]['langue']\n superficie = countries[x]['superficie']\n haditant = countries[x]['population']\n pib = countries[x]['pib']\n\n print(f\"Nom ==> {nom}\")\n print(f\"Capital ==> {capital}\")\n print(f\"Continent ==> {continent}\")\n print(f\"Date Independance ==> {independance}\")\n print(f\"Nom President Actuel ==> {president}\")\n print(f\"Langue Offielle ==> {langue}\")\n print(f\"Superficie ==> {superficie}\")\n print(f\"Nombre d'habitants ==> {haditant}\")\n print(f\"PIB ==> {pib}\")", "def country(request):\n class Results(object):\n\n def __init__(self, cc):\n self.cc = cc\n self.registered = 0\n self.dns = 0\n self.dnf = 0\n \n def add_rider(self, rider):\n self.registered += 1\n\n if rider.dns:\n self.dns += 1\n\n if rider.dnf:\n self.dnf += 1\n\n def finish_rate(self):\n \n rate = 100*(self.registered-self.dns-self.dnf)/(self.registered-self.dns)\n return rate\n\n results = {}\n for rider in models.Rider.objects.all():\n cc = rider.country.code\n results[cc] = results.get(cc, Results(cc))\n results[cc].add_rider(rider)\n\n results = results.values()\n sort = request.GET.get('sort', 'country')\n\n if sort == \"country\":\n results.sort(key=lambda x: x.cc)\n elif sort == \"registered\":\n results.sort(key=lambda x: x.registered, reverse=True)\n elif sort == \"rate\":\n results.sort(key=lambda x: x.registered, reverse=True)\n results.sort(key=lambda x: x.finish_rate(), reverse=True)\n\n total_registered = sum([r.registered for r in results])\n total_dns = sum([r.dns for r in results])\n total_dnf = sum([r.dnf for r in results])\n overall_finish_rate = 100 * (total_registered-total_dns-total_dnf)/(total_registered-total_dns)\n\n template = env.get_template(\"country.html\")\n rendered = template.render(dict(results=results,\n country_names=countries.OFFICIAL_COUNTRIES,\n registered=total_registered,\n total_dns=total_dns,\n total_dnf=total_dnf,\n overall_finish_rate=overall_finish_rate,\n ))\n\n return HttpResponse(rendered)", "def plot_country_representation():\n\n # Get all player data, drops duplicates\n all_players = players.copy().drop_duplicates(subset=\"name\", keep=\"first\")\n # Groupy origin, count unique names (unique since there are no duplicates)\n all_players = all_players.groupby(\"origin\")[\"name\"].count()\n # Push name and origin into columns\n all_players = pd.DataFrame(all_players.reset_index())\n\n # Get all top30 player data, drop duplicates\n top30_players = current_lineups.drop_duplicates(\n subset=\"name\", keep=\"first\")\n # Groupy origin, count unique names (unique since there are no duplicates)\n top30_players = top30_players.groupby(\"origin\")[\"name\"].count()\n # Push name and origin into columns\n top30_players = pd.DataFrame(top30_players.reset_index())\n\n # Get all player data\n majors = players.copy()\n # Filter so only players that have attended Major Tournaments are present\n majors = majors[majors[\"tournament\"].isin(large_tourneys)]\n # Drop duplicates\n majors = majors.drop_duplicates(subset=\"name\", keep=\"first\")\n # Groupby origin, count names\n majors = majors.groupby(\"origin\")[\"name\"].count()\n # Add name and origin back to columns\n majors = pd.DataFrame(majors.reset_index())\n\n # Sort values by count of player\n all_players = all_players.sort_values(by=\"name\", ascending=False)\n top30_players = top30_players.sort_values(by=\"name\", ascending=False)\n majors = majors.sort_values(by=\"name\", ascending=False)\n\n # Renaming columns to better describe data\n top30_players = top30_players.rename(\n columns={\"name\": \"Number of Players\", \"origin\": \"Country\"})\n all_players = all_players.rename(\n columns={\"name\": \"Number of Players\", \"origin\": \"Country\"})\n majors = majors.rename(\n columns={\"name\": \"Number of Players\", \"origin\": \"Country\"})\n\n return top30_players", "def getTopPopulationRegion(self):\n\t\tdata = {}\n\t\tfor iProvince in range(con.iNumRegions):\n\t\t\tdata[iProvince] = 0\n\t\tfor iLoopPlayer in range(con.iBarbarian + 1):\n\t\t\tapCityList = PyPlayer(iLoopPlayer).getCityList()\n\t\t\tfor pCity in apCityList:\n\t\t\t\tdata[pCity.GetCy().plot().getRegionID()] += pCity.getPopulation()\n\t\tkey = -1\n\t\tfor key, value in sorted(data.iteritems(), key=lambda (k,v): (v,k)):\n\t\t\tpass\n\t\treturn key", "def get_country_data():\n\n parser = argparse.ArgumentParser(\n description='Retrieve aggregated stats by aggregation type, metric, and region.',\n )\n parser.add_argument(\n '--aggregation',\n required=True,\n choices=[\n 'avg',\n 'count',\n 'max',\n 'min',\n 'sum',\n ],\n help='Aggregation type',\n )\n parser.add_argument(\n '--field',\n required=True,\n choices=[\n 'area',\n 'borders',\n 'countries',\n 'currencies',\n 'gini',\n 'languages',\n 'latlng',\n 'population',\n ],\n help='Metric to aggregate',\n )\n parser.add_argument(\n '--by',\n required=True,\n choices=[\n 'region',\n 'subregion',\n ],\n help='Field to group aggregates by',\n )\n\n args = parser.parse_args()\n params = {\n 'aggregation': args.aggregation,\n 'field': args.field,\n 'by': args.by,\n }\n return process_aggregation_request(params)", "def sorted_countries():\n ahh = [(country, COUNTRY_DATA[country]['data'].deaths[-1]) for country in COUNTRY_DATA.keys()]\n sorted_countries = sorted(ahh, key=lambda x: x[1], reverse=True)\n return [data[0] for data in sorted_countries]", "def fromCountry():\r\n query = db.session.query(Eurovision.from_country.distinct().label(\"countries\"))\r\n countries = [row.countries for row in query.all()]\r\n # Return a list of the column names (sample names)\r\n return jsonify(list(countries))", "def high_income_countries():\r\n high_countries_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in high_countries:\r\n high_countries_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in high_countries_data:\r\n if idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians", "def test_top_country(self):\n max_students_country_alpha_3_label = 'Top Country by Enrollment'\n\n target_html_object = html_target.activity_metric_with_id('top_country').format(\n '', max_students_country_alpha_3_label\n )\n\n self.assertContains(self.response, target_html_object, 1)", "def get_countries():\n call = build_call('attr', 'country')\n return request_data(call)", "def consolidate_country_data(stream): \n country_data = {}\n country_traffic_size = get_country_to_traffic_size(stream)\n country_packet_count = get_country_to_packet_count(stream)\n\n for country in country_traffic_size:\n data = {}\n data[PACKET_COUNT] = country_packet_count[country]\n data[TRAFFIC_SIZE] = country_traffic_size[country]\n country_data[country] = data\n\n return country_data", "def sort_top_10(data: List[EmissionPerCapita], current_year: int) -> List[list]:\r\n top_10 = get_top_10(data, current_year) # Call get_top_10.\r\n index = current_year - top_10[0].start_year # Get the index for current_year.\r\n values = []\r\n countries = []\r\n\r\n # Get the value of emission per person of each element, and sort the values.\r\n for emission in top_10:\r\n values.append(emission.epc_year[index])\r\n\r\n list.sort(values)\r\n\r\n # use the sorted values to get corresponding country names in the same order.\r\n for value in values:\r\n for country in data:\r\n if country.epc_year[index] == value:\r\n countries.append(country.name)\r\n\r\n return [countries, values]", "def _get_daily_countries_data(date, country, region):\n # date[0] = DAY(DD), date[1] = MONTH(MM)\n file_name = \"{}-{}-2020.csv\".format(date[1], date[0])\n data_dir = os.path.join(\"country_data\",\n \"{}_monthly_{}\".format(country, date[1]))\n if not os.path.isdir(data_dir):\n os.makedirs(data_dir)\n fullpath_file = os.path.join(data_dir, file_name)\n if not os.path.isfile(fullpath_file):\n url = os.path.join(JOHN_HOPKINS, file_name)\n urllib.urlretrieve(url, fullpath_file)\n\n # file reading\n with open(fullpath_file, \"r\") as csv_file:\n reader = csv.reader(csv_file, delimiter=',', quotechar='\"')\n data_read = [row for row in reader]\n\n # parse for both older JHU data format and newer\n if date[1] == '03' and int(float(date[0])) < 23:\n # geography index in file\n cidx = 1\n if region:\n cidx = 0\n idx_pack = [2, 3, 4, 5]\n (exp_dates,\n count_cases,\n count_deaths,\n count_rec) = _get_extracted(data_read, country, idx_pack, cidx)\n country_data = ',' + ','.join([country, exp_dates,\n str(count_cases),\n str(count_deaths), str(count_rec)])\n if region:\n country_data = ','.join([country, \"REGION\",exp_dates,\n str(count_cases),\n str(count_deaths), str(count_rec)])\n else:\n # geography index in file\n cidx = 3\n if region:\n cidx = 2\n idx_pack = [4, 7, 8, 9]\n (exp_dates,\n count_cases,\n count_deaths,\n count_rec) = _get_extracted(data_read, country, idx_pack, cidx)\n country_data = ',,,' + ','.join([country, exp_dates, \"c1\", \"c2\",\n str(count_cases),\n str(count_deaths), str(count_rec)])\n if region:\n country_data = ',,' + ','.join([country, \"REGION\",exp_dates,\n \"c1\", \"c2\",\n str(count_cases),\n str(count_deaths), str(count_rec)])\n csv_file.close()\n os.remove(fullpath_file)\n\n # overwrite so to optimize disk use\n with open(fullpath_file, \"w\") as file:\n file.write(country_data)\n\n return count_cases, count_deaths, count_rec, exp_dates", "def get_vaccine_stats(self):\n final_response = {}\n dataframe = self.query_api()\n filtered_dataframe = self.filter_state(dataframe, self.state)\n final_response[self.state] = self.get_stats(filtered_dataframe, self.state)\n filtered_dataframe = self.filter_state(dataframe, \"India\")\n final_response[\"India\"] = self.get_stats(filtered_dataframe, \"India\")\n return json.dumps(final_response, indent=2)", "def get(world, country, json):\n if world:\n dt = data.get_global_data()\n pretty_print(dt, json)\n if country:\n dt = data.get_country_data(country)\n pretty_print(dt, json)", "def covid_data_chart(request):\n\n print(request)\n labels = []\n data = []\n\n queryset = CovidData.objects.filter(country=\"Australia\").order_by(\"date\")\n for entry in queryset:\n labels.append(entry.date)\n data.append(entry.confirmed)\n\n return JsonResponse(\n data={\n \"country\": \"Australia Covid Cases\",\n \"labels\": labels,\n \"data\": data,\n }\n )", "def east_asia_pacific_countries():\r\n east_asia_pacific_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in east_asia_pacific:\r\n east_asia_pacific_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in east_asia_pacific_data:\r\n if idx != None and idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians", "def get_top_trends_from_twitter_api(country='Japan', exclude_hashtags=True):\n # this stupid WOEID requires yweather to get (a library), because YAHOO itself has stopped supporting it\n # WOEID\n woeid_client = yweather.Client()\n woeid = woeid_client.fetch_woeid(location=country)\n\n check_rate_limit()\n\n if exclude_hashtags :\n trends = api.GetTrendsWoeid(woeid, exclude='hashtags')\n else:\n trends = api.GetTrendsWoeid(woeid, exclude=None)\n\n output = []\n images_output = []\n for trend in trends:\n trend = trend.AsDict()\n\n # get volumes\n try:\n tw_volume = int(trend['tweet_volume']),\n except:\n tw_volume = [0]\n\n # match time with timezone\n timestamp_str = trend['timestamp'] # this is utc\n timestamp_dt = str_2_datetime(timestamp_str, input_format=time_format_twitter_trends).replace(tzinfo=pytz.utc)\n\n # timestamp_local = timestamp_dt.astimezone(tz=pytz.utc)\n timestamp_utc_str = datetime_2_str(timestamp_dt, output_format=time_format_full_with_timezone)\n\n output.append({\n \"label\": trend['name'],\n \"volume\": tw_volume,\n \"time\": timestamp_utc_str,\n \"query\": trend['query'],\n \"url\": trend['url'],\n })\n\n images_output.append({\n \"label\": trend['name'],\n \"time\": timestamp_utc_str,\n \"tweets\": analyze_trending_keyword(trend['name'], count=50)\n })\n\n output_json = json.dumps(output, ensure_ascii=False)\n images_output_json = json.dumps(images_output, ensure_ascii=False)\n return output_json, images_output_json", "def __top_countries_and_companies(self, df):\n\t\tself.top_countries = set(self.__top_prod_countries(df, self.number_of_countries))\n\t\tself.top_companies = set(self.__top_prod_companies(df, self.number_of_companies))\n\t\tdf['is_top_prod'] = df['production_companies'].apply(\n\t\t\tlambda x: 1 if len(set(x).intersection(self.top_companies)) >= 1 else 0)\n\t\tfor country in self.top_countries:\n\t\t\tdf[country] = df['production_countries'].apply(lambda x: 1 if country in x else 0)\n\t\treturn df" ]
[ "0.7188561", "0.6930038", "0.6566591", "0.650713", "0.64550346", "0.6345606", "0.6311068", "0.6263752", "0.62176055", "0.6169439", "0.6162773", "0.6110329", "0.6062588", "0.6030514", "0.5980516", "0.59804785", "0.59776163", "0.59610236", "0.59238714", "0.59127903", "0.5910179", "0.5899181", "0.58913016", "0.5877797", "0.5866588", "0.5749677", "0.5738419", "0.5735062", "0.57271874", "0.5722993" ]
0.7455793
0
Sets the cleanup_metadata of this PersonDeleteOptions.
def cleanup_metadata(self, cleanup_metadata): self._cleanup_metadata = cleanup_metadata
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setCleanupTool(self,value):\n self.PDFreactorConfiguration.in1[\"cleanupTool\"] = value", "def clean_up_daemon_sets_in_namespaces_with_cleanup_policy(self, namespaces, cleanup_policy):\n return self.delete_resource_with_cleanup_policy(namespaces, cleanup_policy,\n self.apps_api.delete_collection_namespaced_daemon_set, \"DS\")", "def delete_server_metadata(self, name):\n raise NotImplementedError", "def _delete_cache_metadata(self, force_delete_file):\n if force_delete_file:\n self._delete_dirs_datasets_in_cache_dir_except_downloads()\n else:\n msg = 'All metadata files of all datasets will be lost if you proceed! ' + \\\n 'Set both \\'force_delete_file=True\\' and \\'force_delete_metadata=True\\' ' + \\\n 'to proceed with the deletion of dbcollection.json and all metadata files.'\n warnings.warn(msg, UserWarning, stacklevel=2)", "def set_metadata(self, metadata):\n self.metadata = metadata\n return self", "def auto_delete_after(self, auto_delete_after):\n\n self._auto_delete_after = auto_delete_after", "def handleCleanMetadataKeep(self):\n logging.debug(\"Removing all metadata found...\")\n filePath = self.filesList.selectedItems()[0].text(2)\n self.filesList.removeAllMeta(filePath)", "def delete_metadata(self, scope, name, key, *, session: \"Session\"):\n if not json_implemented(session=session):\n raise NotImplementedError\n\n try:\n row = session.query(models.DidMeta).filter_by(scope=scope, name=name).one()\n existing_meta = getattr(row, 'meta')\n # Oracle returns a string instead of a dict\n if session.bind.dialect.name in ['oracle', 'sqlite'] and existing_meta is not None:\n existing_meta = json_lib.loads(existing_meta)\n\n if key not in existing_meta:\n raise exception.KeyNotFound(key)\n\n existing_meta.pop(key, None)\n\n row.meta = None\n session.flush()\n\n # Oracle insert takes a string as input\n if session.bind.dialect.name in ['oracle', 'sqlite']:\n existing_meta = json_lib.dumps(existing_meta)\n\n row.meta = existing_meta\n except NoResultFound:\n raise exception.DataIdentifierNotFound(f\"Key not found for data identifier '{scope}:{name}'\")", "def delete(self) -> None:\n try:\n self._logger.debug('Delete old metadata file %s.', self._path)\n os.remove(self._path)\n except OSError as ex:\n if ex.errno != errno.ENOENT:\n msg = 'Failed to delete old metadata file. {}'.format(ex.strerror)\n raise MetaFileError(msg)", "def tearDown(self):\n if self.flavor_creator:\n self.flavor_creator.clean()\n\n super(self.__class__, self).__clean__()", "def set_metadata(self, metadata):\n if self.num_features != metadata.num_features:\n raise ValueError(\"Invalid metadata for feature list\")\n self.metadata = metadata", "def cleanup_job_period(self, cleanup_job_period: ConfigNodePropertyInteger):\n\n self._cleanup_job_period = cleanup_job_period", "def delete_metadata(self, volume, keys, deletes=10, delete_size=3):\n self._impl.delete_metadata(volume, keys=keys, deletes=10,\n delete_size=3)", "def _delete_metadata(self, metadata_role):\n \n # The root metadata role is never deleted without a replacement.\n if metadata_role == 'root':\n return\n \n # Get rid of the current metadata file.\n self._move_current_to_previous(metadata_role)\n \n # Remove knowledge of the role.\n if metadata_role in self.metadata['current']:\n del self.metadata['current'][metadata_role]\n tuf.roledb.remove_role(metadata_role)", "def cleanup_time(self, cleanup_time):\n\n self._cleanup_time = cleanup_time", "def delete_meta_file(self):\n try:\n self.logger.debug('Delete old metadata file %s.', self.meta_file_path)\n os.remove(self.meta_file_path)\n except OSError as ex:\n if ex.errno != errno.ENOENT:\n raise MetadataError('Failed to delete old metadata file. {}'\n .format(ex.strerror))", "def delete_metadata(self, keys=None):\n return self.manager.delete_metadata(self, keys=keys)", "def clean_object(metadata, analysistype):\n for sample in metadata:\n try:\n delattr(sample[analysistype], \"targetnames\")\n except AttributeError:\n pass\n try:\n delattr(sample[analysistype], \"targets\")\n except AttributeError:\n pass\n try:\n delattr(sample[analysistype], \"dnaseq\")\n except AttributeError:\n pass\n try:\n delattr(sample[analysistype], \"protseq\")\n except AttributeError:\n pass", "def metadata_delete(self, endpoint_name=None, key=None):\n if key is None:\n raise Exception(\"Key required!\")\n if endpoint_name is None:\n self.request('/v1.1/endpoint/metadata/%s' % key, 'DELETE')\n else:\n self.request('/v1.1/endpoints/%s/metadata/%s' % (endpoint_name, key), 'DELETE')", "def handleCleanMetadataRecon(self):\n logging.debug(\"Removing compromising personal info and remaking the file...\")\n filePath = self.filesList.selectedItems()[0].text(2)\n fileType = self.filesList.getFileObj(filePath).type\n self.printPdfPersonalData(filePath, \n fileType,\n AddedFile.changeBase(filePath, self.outputPath))\n self.tabArea.setCurrentIndex(1)\n self.changeCursor()\n self.filesList.getFileObj(filePath).reconMetaCleaned = True", "def set_metadata(self, metadata):\n return self.manager.set_metadata(self, metadata)", "def set_metadata(self, metadata):\n return self.manager.set_metadata(self, metadata)", "def cleanup(self, context, instance, network_info, block_device_info=None,\n destroy_disks=True):\n pass", "def setDestroy(self):\n self.destroy = True", "def token_cleanup_threshold(self, token_cleanup_threshold):\n\n self._token_cleanup_threshold = token_cleanup_threshold", "def setup_args_delete(parser):\n parser.add_argument(\"--tag\", required=False)\n parser.add_argument(\"--save\", required=False,\n dest=\"save\", action=\"store_true\")\n parser.add_argument(\"--list\", required=False,\n dest=\"delete_list\", action=\"store_true\")\n return parser", "def storage_metadata(self, storage_metadata):\n self._storage_metadata = storage_metadata", "def delete_metadata(self, keys):\n return self.manager.delete_metadata(self, keys)", "def vault_delete(self, vault_delete):\n self._vault_delete = vault_delete", "def clean(self):\n self.clean_rally_conf()\n self.clean_rally_logs()\n if self.flavor_alt:\n self.orig_cloud.delete_flavor(self.flavor_alt.id)\n super().clean()" ]
[ "0.49342144", "0.48299047", "0.48164648", "0.4738852", "0.46431234", "0.46101066", "0.4605008", "0.4540497", "0.45076135", "0.44877455", "0.4485631", "0.44735658", "0.44406572", "0.4437597", "0.44371903", "0.44357738", "0.4411002", "0.43998602", "0.43897504", "0.43820536", "0.4376613", "0.4376613", "0.43625534", "0.43609884", "0.43470874", "0.43021798", "0.42983657", "0.42750475", "0.42734513", "0.42572662" ]
0.70799506
0
Sets the home_folder of this PersonDeleteOptions.
def home_folder(self, home_folder): self._home_folder = home_folder
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def homeDirectory(self, ignored_value):\n\t\tself.__homeDirectory = self._resolve_home_directory()", "def home(self, home):\n if home is None:\n raise ValueError(\"Invalid value for `home`, must not be `None`\") # noqa: E501\n\n self._home = home", "def GetHomeFolder(self): # real signature unknown; restored from __doc__\n pass", "def clear_data_home(data_home: str = None):\n data_home = get_data_home(data_home)\n shutil.rmtree(data_home)", "def user_home(self, user_home_path: str):\n c = self.clone()\n c._user_home_path = path.normpath(user_home_path)\n return c", "def set_user_home(self, path):\n os.environ['HOME'] = path", "def set_user_home(self, path):\n os.environ['HOME'] = path", "def clear_data_home(data_home=None):\n data_home = get_data_home(data_home)\n shutil.rmtree(data_home)", "def homedir(options=['/home/jennifer/', '/home/jwalker/',\n 'C:/Users/jenfl/']):\n\n home = None\n for h in options:\n if os.path.isdir(h):\n home = h\n if home is None:\n raise ValueError('Home directory not found in list of options.')\n return home", "def homeDirectory(self):\n\t\treturn self.__homeDirectory", "def set_share_user_home_dir(self, bShareUserHomeDir):\n\t\tcall_sdk_function('PrlVmCfg_SetShareUserHomeDir', self.handle, bShareUserHomeDir)", "def home_team(self, home_team):\n\n self._home_team = home_team", "def overridden_users_home_directories(self, overridden_users_home_directories):\n\n self._overridden_users_home_directories = overridden_users_home_directories", "def __validate_home_dir(self, home, login, system, force):\n\n\t\tif system:\n\t\t\tif home:\n\t\t\t\tif os.path.exists(home) and not force:\n\t\t\t\t\traise exceptions.BadArgumentError(_(u'Specified directory '\n\t\t\t\t\t\t'{0} for system user {1} already exists. If you '\n\t\t\t\t\t\t'really want to use it, please use the --force '\n\t\t\t\t\t\t'argument.').format(stylize(ST_PATH, home),\n\t\t\t\t\t\tstylize(ST_NAME,login)))\n\n\t\t\t\tif not home.startswith(\n\t\t\t\t\tsettings.defaults.home_base_path) \\\n\t\t\t\t\tand not home.startswith('/var') \\\n\t\t\t\t\tor home.startswith(LMC.configuration.groups.base_path) \\\n\t\t\t\t\tor home.find('/tmp') != -1:\n\n\t\t\t\t\traise exceptions.BadArgumentError(_(u'Specified home '\n\t\t\t\t\t\t'directory {0} for system user {1} is outside {2} '\n\t\t\t\t\t\t'and /var, or inside {3} or a temporary '\n\t\t\t\t\t\t'directory (/var/tmp, /tmp). This is unsupported, '\n\t\t\t\t\t\t'Aborting.').format(\n\t\t\t\t\t\tstylize(ST_PATH, home),\n\t\t\t\t\t\tstylize(ST_NAME,login),\n\t\t\t\t\t\tsettings.defaults.home_base_path,\n\t\t\t\t\t\tLMC.configuration.groups.base_path))\n\n\t\t\t\tif home in (user.homeDirectory for user in self):\n\t\t\t\t\traise exceptions.BadArgumentError(_(u'Specified home '\n\t\t\t\t\t\t'directory {0} for system user {1} is already owned '\n\t\t\t\t\t\t'by another user. Please choose another one.').format(\n\t\t\t\t\t\tstylize(ST_PATH, home),\n\t\t\t\t\t\tstylize(ST_NAME, login)))\n\n\t\t\t\treturn home\n\t\telse: # not system\n\t\t\tif home:\n\t\t\t\tlogging.warning(_(u'Specifying an alternative home directory '\n\t\t\t\t\t'is not allowed for standard users. Using standard home '\n\t\t\t\t\t'path {0} instead.').format(\n\t\t\t\t\t\tstylize(ST_PATH, '%s/%s' % (\n\t\t\t\t\t\t\tLMC.configuration.users.base_path, login))))\n\n\t\treturn \"%s/%s\" % (LMC.configuration.users.base_path, login)", "def user_home_path(self):\n return path.join(env.user_home, self._user_home_path)", "def get_home_path(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetHomePath', self.handle)", "def get_data_home(data_home: str = None):\n if not data_home:\n data_home = os.environ.get(\n 'DAVID_DATA', os.path.join('~', 'david_data'))\n data_home = os.path.expanduser(data_home)\n if not os.path.exists(data_home):\n os.makedirs(data_home)\n return data_home", "def entry_set_folder(self, entry):\r\n global folder_name\r\n folder_name = filedialog.askdirectory()\r\n entry.delete(0, 'end')\r\n entry.insert(tk.END, folder_name)", "def select_hares_folder(self):\n # Get path\n path = QtWidgets.QFileDialog().getExistingDirectory(self, 'HARES uitvoerbestanden folder')\n if not path:\n return None\n\n self.input_elements['hares folder'].set_value(path)", "def path_homeassistant(self) -> Path:\n return self.path_supervisor / HOMEASSISTANT_CONFIG", "def redefine_app_config_home(self, config_home):\n dst = _app_config_file()\n new_config = (\n pyhocon.ConfigFactory.parse_string(\n \"aiscalator.app_config_home_directory = \" + config_home\n )\n ).with_fallback(_app_config_file(), resolve=False)\n with open(dst, \"w\") as output:\n output.write(\n pyhocon.converter.HOCONConverter.to_hocon(new_config)\n )\n self._app_conf = new_config\n return new_config", "def home_directory(self):\n out = self._call(\"GETHOMEDIRECTORY\")\n return out.json()[\"Path\"]", "def set_pkg_home(self, doc, location):\n self.assert_package_exists()\n if not self.package_home_set:\n self.package_home_set = True\n if validations.validate_pkg_homepage(location):\n doc.package.homepage = location\n return True\n else:\n raise SPDXValueError('Package::HomePage')\n else:\n raise CardinalityError('Package::HomePage')", "def validate_home(user, data, account):\n if not 'username' in data and not user.pk:\n # other validation will have been raised for required username\n return\n user = type(user)(\n username=data.get('username') or user.username,\n shell=data.get('shell') or user.shell,\n )\n if 'home' in data and data['home']:\n home = os.path.normpath(data['home'])\n user_home = user.get_base_home()\n account_home = account.main_systemuser.get_home()\n if user.has_shell:\n if home != user_home:\n raise ValidationError({\n 'home': _(\"Not a valid home directory.\")\n })\n elif home not in (user_home, account_home):\n raise ValidationError({\n 'home': _(\"Not a valid home directory.\")\n })", "def test_config_home_custom_home_dir():\n cache_folder = os.path.join(temp_folder(), \"custom\")\n with environment_append({\"CONAN_USER_HOME\": cache_folder}):\n client = TestClient(cache_folder=cache_folder)\n client.run(\"config home\")\n assert cache_folder in client.out\n client.run(\"config home --json home.json\")\n _assert_dict_subset({\"home\": cache_folder}, json.loads(client.load(\"home.json\")))", "def spark_home(self, sparkHome):\n self.sparkProperties[SparkProperties.SPARK_MESOS_EXECUTOR_HOME] = sparkHome\n return self", "def get_home_dir(self) -> str:\n ret = os.path.expanduser(\"~\")\n if not os.path.exists(ret):\n raise RuntimeError(\"The home directory does not exist.\")\n return ret", "def go_home(self):\r\n if self.home_url is not None:\r\n self.set_url(self.home_url)", "def set_folder(self, folder):\n self.folder = folder\n self.templates.directories[0] = folder\n self.app.root_path = folder", "def reset_backup_folder(self):\n pass" ]
[ "0.6487305", "0.63046443", "0.6077848", "0.58213824", "0.58197993", "0.58150965", "0.58150965", "0.5753256", "0.57263446", "0.567206", "0.5638868", "0.54021066", "0.53872454", "0.5385635", "0.53816557", "0.53468376", "0.53266287", "0.5325523", "0.5319053", "0.52196246", "0.51817894", "0.51177895", "0.5103245", "0.5087924", "0.50796807", "0.50413394", "0.4999796", "0.49996877", "0.49805436", "0.4955576" ]
0.75828874
0
Sets the shared_folders of this PersonDeleteOptions.
def shared_folders(self, shared_folders): self._shared_folders = shared_folders
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_user_defined_shared_folders_enabled(self, bEnabled):\n\t\tcall_sdk_function('PrlVmCfg_SetUserDefinedSharedFoldersEnabled', self.handle, bEnabled)", "def set_map_shared_folders_on_letters(self, bMapSharedFoldersOnLetters):\n\t\tcall_sdk_function('PrlVmCfg_SetMapSharedFoldersOnLetters', self.handle, bMapSharedFoldersOnLetters)", "def set_folders(self, folders):\n\n self.folders = folders", "def is_user_defined_shared_folders_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsUserDefinedSharedFoldersEnabled', self.handle))", "def set_share_all_host_disks(self, bShareAllHostDisks):\n\t\tcall_sdk_function('PrlVmCfg_SetShareAllHostDisks', self.handle, bShareAllHostDisks)", "def delete_s2_shared_targets(self,\n shared_targets,\n **ignore):\n action = const.ACTION_DELETE_S2_SHARED_TARGETS\n valid_keys = [\n 'shared_targets',\n ]\n body = filter_out_none(locals(), valid_keys)\n if not self.conn.req_checker.check_params(\n body,\n list_params=['shared_targets'],\n ):\n return None\n\n return self.conn.send_request(action, body)", "def shared_scope(self, shared_scope):\n\n self._shared_scope = shared_scope", "def shared_scope(self, shared_scope):\n\n self._shared_scope = shared_scope", "def set_share_user_home_dir(self, bShareUserHomeDir):\n\t\tcall_sdk_function('PrlVmCfg_SetShareUserHomeDir', self.handle, bShareUserHomeDir)", "def upload_shared():\n # MARK: default copy to home dir\n put(conf.INS_ARGS['shared_folder'], '~/')", "def rm_common(self, dir_delete, dir_compare):\n ignore = self.settings.get('commonIgnore')\n if isinstance(ignore, str):\n ignore = [ignore]\n dcmp = dircmp(dir_delete, dir_compare, ignore)\n for file_name in dcmp.common_files:\n os.remove(dir_delete + '/' + file_name)\n for directory in dcmp.common_dirs:\n shutil.rmtree(dir_delete + '/' + directory)", "def set_shared_lib_options(self, task):\n pass", "def common_options(self, common_options):\n self._common_options = common_options", "def set_guest_sharing_enabled(self, bVmGuestSharingEnabled):\n\t\tcall_sdk_function('PrlVmCfg_SetGuestSharingEnabled', self.handle, bVmGuestSharingEnabled)", "def is_map_shared_folders_on_letters(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsMapSharedFoldersOnLetters', self.handle))", "def set_shared_details(self, shared_details):\n\n\t\tif shared_details is not None and not isinstance(shared_details, list):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: shared_details EXPECTED TYPE: list', None, None)\n\t\t\n\t\tself.__shared_details = shared_details\n\t\tself.__key_modified['shared_details'] = 1", "def expungeSharedNotebooks(self, authenticationToken, sharedNotebookIds):\r\n pass", "def trigger_item_shared(self, checked):\n if self.selected_item and checked != self.selected_item.shared:\n if self.selected_item.shared:\n self.selected_item.unshare()\n else:\n self.selected_item.share()\n self.controller.item_shared(self.selected_item)", "def init_sharing_path(self):\n if not os.path.isdir(self.cfg['sharing_path']):\n try:\n os.makedirs(self.cfg['sharing_path'])\n except OSError:\n self.stop(1, '\\nImpossible to create \"{0}\" directory! Check sharing_path value contained in the following file:\\n\"{1}\"\\n'\n .format(self.cfg['sharing_path'], Daemon.CONFIG_FILEPATH))", "def folder_ids(self, folder_ids):\n\n self._folder_ids = folder_ids", "def set_path(self, sNewSharePath):\n\t\tcall_sdk_function('PrlShare_SetPath', self.handle, sNewSharePath)", "def test_shared_albums(self):\n with ts.SetupDbAndCredentials() as s:\n args = [\"--skip-files\"]\n s.test_setup(\n \"test_shared_albums\", args=args, trash_files=True, trash_db=True\n )\n s.gp.start(s.parsed_args)\n\n t = (\n TestAccount.album_image_count\n + TestAccount.album_shared_image_count\n + TestAccount.shared_album_image_count\n + TestAccount.shared_album_shared_image_count\n )\n\n with LocalData(s.root) as db:\n db.cur.execute(\"SELECT COUNT() FROM AlbumFiles\")\n count = db.cur.fetchone()\n self.assertEqual(\n t,\n count[0],\n \"expected {} files in all albums including shared\".format(t),\n )\n\n with ts.SetupDbAndCredentials() as s:\n args = [\"--skip-files\", \"--skip-shared-albums\"]\n s.test_setup(\n \"test_shared_albums\", args=args, trash_files=True, trash_db=True\n )\n s.gp.start(s.parsed_args)\n\n # note that unless we use --no-album-index the shared files in the\n # visible album will show up here\n t = (\n TestAccount.album_image_count + TestAccount.album_shared_image_count\n ) # see above\n with LocalData(s.root) as db:\n db.cur.execute(\"SELECT COUNT() FROM AlbumFiles\")\n count = db.cur.fetchone()\n self.assertEqual(\n t,\n count[0],\n \"expected {} files in all albums excluding shared\".format(t),\n )", "def set_host_sharing_enabled(self, bVmHostSharingEnabled):\n\t\tcall_sdk_function('PrlVmCfg_SetHostSharingEnabled', self.handle, bVmHostSharingEnabled)", "def test_sharedstatedir(self):\n self.chck_triple('sharedstatedir')", "def set_share(self, total_people):\n self.paid = self._get_paid()\n self.share = round(self.paid/Decimal(total_people), 2)", "def _unshare_dir(target):\n logging.debug(\"Un-sharing directory %s\" % target)\n os.rmdir(target)", "def delete_share(self, context, share, share_server=None):\n local_share_path = self._get_local_share_path(share)\n cmd = ['rm', '-rf', local_share_path]\n try:\n self._execute(*cmd, run_as_root=True)\n except exception.ProcessExecutionError:\n LOG.error(_LE('Unable to delete share %s'), share['name'])\n raise", "def _share_folder(self, nms, volume, folder):\n path = '%s/%s' % (volume, folder.lstrip('/'))\n share_opts = {\n 'read_write': '*',\n 'read_only': '',\n 'root': 'nobody',\n 'extra_options': 'anon=0',\n 'recursive': 'true',\n 'anonymous_rw': 'true',\n }\n LOG.debug('Sharing folder %s on Nexenta Store', folder)\n nms.netstorsvc.share_folder('svc:/network/nfs/server:default', path,\n share_opts)", "async def delete_folders_of_project(\n folder_id: str, user_id: UserID, node_id: NodeID | None = None\n):", "def delete_share(self, context, share, share_server=None):\n volume_uuid = self._resolve_volume_name(share['name'],\n share['project_id'])\n if not volume_uuid:\n LOG.warning(\"No volume found for \"\n \"share %(project_id)s/%(name)s\",\n {\"project_id\": share['project_id'],\n \"name\": share['name']})\n return\n\n if self.configuration.quobyte_delete_shares:\n self.rpc.call('deleteVolume', {'volume_uuid': volume_uuid})\n else:\n self.rpc.call('exportVolume', {\"volume_uuid\": volume_uuid,\n \"remove_export\": True,\n })" ]
[ "0.6073998", "0.56869525", "0.51037014", "0.50935656", "0.486683", "0.47957003", "0.45884755", "0.45884755", "0.45875266", "0.45848623", "0.45294005", "0.44923756", "0.4445932", "0.44363564", "0.44327128", "0.44121525", "0.44094914", "0.43840384", "0.43696904", "0.43313774", "0.43305263", "0.4327368", "0.43203443", "0.43111166", "0.43089688", "0.42842153", "0.42826372", "0.42758757", "0.4261688", "0.42588308" ]
0.75721765
0
Sets the collections of this PersonDeleteOptions.
def collections(self, collections): self._collections = collections
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drop_collections(self, collections):\n for collection in collections:\n self.drop_collection(collection)", "def set_collection(self, service_name, collection_name, to_cache):\n self.services.setdefault(service_name, {})\n self.services[service_name].setdefault('collections', {})\n self.services[service_name]['collections'].setdefault(collection_name, {})\n options = self.services[service_name]['collections'][collection_name]\n classpath = self.build_classpath(to_cache.__bases__[0])\n\n if classpath == 'kotocore.collections.Collection':\n classpath = 'default'\n\n options[classpath] = to_cache", "def test_delete_collections(self):\n pass", "def setOptions(self, options):\n assert isinstance(options, list);", "def setCollection(self, collection):\n self.collectionName = collection[\"name\"]\n self.collectionType = collection[\"type\"]\n return", "def test_delete_collection_o_auth_client(self):\n pass", "def remove_collection_names(account, dataset, tags):\n\n token = get_access_token()\n\n if dataset.startswith(\"N:dataset:\"):\n selected_dataset_id = dataset\n else:\n selected_dataset_id = get_dataset_id(token, dataset)\n\n if not has_edit_permissions(token, selected_dataset_id):\n abort(403, \"You do not have permission to edit this dataset.\")\n\n for tagid in tags:\n r = requests.delete(f\"{PENNSIEVE_URL}/datasets/{str(selected_dataset_id)}/collections/{str(tagid)}\", headers=create_request_headers(token))\n r.raise_for_status()\n\n return dict({\"collection\": \"Collection removed\"})", "def deletecollection_namespaced_o_auth_client(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method deletecollection_namespaced_o_auth_client\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/oauthclients'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def test_delete_collection_o_auth_client_authorization(self):\n pass", "def deletecollection_namespaced_endpoints(self, namespace, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.deletecollection_namespaced_endpoints_with_http_info(namespace, **kwargs)\n else:\n (data) = self.deletecollection_namespaced_endpoints_with_http_info(namespace, **kwargs)\n return data", "def set_collection(self, collection_attr):\n if self.collection_menu:\n self.collection_menu.destroy()\n\n self.collection_menu = _CollectionEditorMenu(collection=collection_attr)\n self.collection_menu.pack(fill=BOTH, expand=TRUE, padx=10, pady=10)", "def test_delete_collection_namespaced_build_config(self):\n pass", "def documents(self, documents):\n\n self._documents = documents", "def test_delete_collection(self):\n pass", "def datasets(self, datasets):\n\n self._datasets = datasets", "def deletecollection_namespaced_o_auth_client_authorization(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method deletecollection_namespaced_o_auth_client_authorization\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/oauthclientauthorizations'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "async def delete_documents(self, collection, ids):\n await self.ensure_collection(collection)\n try:\n if SOLR_COMMIT_WITHIN:\n params = {'commitWithin': SOLR_COMMIT_WITHIN}\n else:\n params = {'commit': 'true'}\n # Delete operation doesn't work with API v2 yet.\n # So using old API (/solr/...).\n await self.post(\n '/solr/{}/update'.format(collection),\n params=params, json_data={\"delete\": ids}\n )\n logger.info('Successfully deleted {} documents from collection {}'\n .format(len(ids), collection))\n except SolrError:\n logger.warning('Failed to delete {} documents from collection {}'\n .format(len(ids), collection))\n raise", "def options(self, options):\n\n self._options = options", "def options(self, options):\n\n self._options = options", "def Collections(self, default=[None]):\n return self.data.get('metadata', {}).get('_collections', default)", "def _reset_collection(self):\r\n\r\n self._meta.queryset._document._collection = None\r\n self._meta.queryset._collection_obj = self._meta.queryset._document._get_collection()\r\n if hasattr(self._meta.queryset, '_reset_already_indexed'):\r\n self._meta.queryset._reset_already_indexed()", "def set_options(self, options):\n self.options = options", "def _reset_collection(base: pymongo.database.Database, collection: str) -> None:\n logger.info(f'Resetting all data related to \"{collection}\" collection...')\n nb_removed = base[collection].delete_many({}).deleted_count\n logger.info(f\"{nb_removed} records deleted.\")\n\n logger.info(f'Resetting counters.\"{collection}\".')\n nb_removed = base[\"counters\"].delete_many({\"_id\": collection}).deleted_count\n logger.info(f\"{nb_removed} counter records deleted\")", "def test_delete_collection_namespaced_deployment_config(self):\n pass", "def del_collection(self, service_name, collection_name, base_class=None):\n # Unlike ``get_collection``, this should be fire & forget.\n # We don't really care, as long as it's not in the cache any longer.\n try:\n classpath = self.build_classpath(base_class)\n opts = self.services[service_name]['collections'][collection_name]\n del opts[classpath]\n except KeyError:\n pass", "def _addCollections(self):\n\n self._colls = dict.fromkeys(self._listOfTargets)\n self._collsNew = dict.fromkeys(self._listOfTargets)\n self._collsMiss = dict.fromkeys(self._listOfTargets)\n self._collsNoLink = dict.fromkeys(self._listOfTargets)\n\n for target in self._listOfTargets:\n self._colls[target] = self._dbUtil.getCollection('XRD_' + self._baseColl[target])\n self._collsNew[target] = self._dbUtil.getCollection('XRD_' + self._baseColl[target]+'_new')\n self._collsMiss[target] = self._dbUtil.getCollection('XRD_' + self._baseColl[target]+'_missing')\n self._collsNoLink[target] = self._dbUtil.getCollection('XRD_' + self._baseColl[target]+'_brokenLink')\n\n self._collDataServer = self._dbUtil.getCollection(\"XRD_DataServers\")", "async def clear(self, remotes: List[PodData] = None, _all: bool = False,\n clear_env_config: bool = False) -> None:\n if not remotes:\n remotes = []\n if not remotes and not _all:\n return\n\n central_conn = None\n try:\n central_conn = await AsyncMongoConnector.create(\n host=self.central_host, port=self.central_port, pwd=self.central_pwd, logger=self.log,\n connect=True\n )\n for col in self.collection_names:\n self.log.info(\"Selectively clearing collection {} from central...\".format(col))\n await central_conn.clear_collection(col,\n envs=[r.full_name for r in remotes],\n _all=_all)\n if clear_env_config:\n self.log.info(\"Selectively clearing collection {} from central...\"\n .format(AsyncMongoConnector.environments_collection))\n await central_conn.clear_collection(AsyncMongoConnector.environments_collection,\n envs=[r.full_name for r in remotes],\n _all=False)\n finally:\n if central_conn:\n central_conn.disconnect()", "def delete_replace_documents(self, database, collection, spec={},\n documents=[]):\n r = self.__get_response(settings.DEL_REP_DOCS,\n {\"db\": database, \"col\": collection}, data=documents, q=spec)\n if r[\"status\"] == 200:\n return r[\"result\"][\"n\"]\n raise Exception(r[\"result\"][\"message\"])", "def set_options(self, options):\n self.options = options", "def delete_collections_with_http_info(self, bucket_id, **kwargs):\n\n all_params = ['bucket_id', 'if_match', 'if_none_match', 'since', 'before', 'sort']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_collections\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'bucket_id' is set\n if ('bucket_id' not in params) or (params['bucket_id'] is None):\n raise ValueError(\"Missing the required parameter `bucket_id` when calling `delete_collections`\")\n\n if 'if_match' in params and not re.search('\\\\\\\"[0-9]+\\\\\\\"', params['if_match']):\n raise ValueError(\"Invalid value for parameter `if_match` when calling `delete_collections`, must conform to the pattern `/\\\\\\\"[0-9]+\\\\\\\"/`\")\n if 'if_none_match' in params and not re.search('\\\\\\\"[0-9]+\\\\\\\"', params['if_none_match']):\n raise ValueError(\"Invalid value for parameter `if_none_match` when calling `delete_collections`, must conform to the pattern `/\\\\\\\"[0-9]+\\\\\\\"/`\")\n\n collection_formats = {}\n\n resource_path = '/buckets/{bucket_id}/collections'.replace('{format}', 'json')\n path_params = {}\n if 'bucket_id' in params:\n path_params['bucket_id'] = params['bucket_id']\n\n query_params = {}\n if 'since' in params:\n query_params['_since'] = params['since']\n if 'before' in params:\n query_params['_before'] = params['before']\n if 'sort' in params:\n query_params['_sort'] = params['sort']\n collection_formats['_sort'] = 'csv'\n\n header_params = {}\n if 'if_match' in params:\n header_params['If-Match'] = params['if_match']\n if 'if_none_match' in params:\n header_params['If-None-Match'] = params['if_none_match']\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['basicAuth']\n\n return self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='List',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)" ]
[ "0.5033303", "0.49573594", "0.48853493", "0.4865477", "0.47951487", "0.46853006", "0.46215004", "0.45475668", "0.45191467", "0.45182347", "0.45089346", "0.4502301", "0.44906533", "0.44569468", "0.44522327", "0.4444393", "0.4437373", "0.44267026", "0.44267026", "0.44095856", "0.4402166", "0.43905175", "0.4361741", "0.4323045", "0.43208858", "0.43184224", "0.43053153", "0.4294544", "0.42903695", "0.42901704" ]
0.62720793
1
Sets the ratings of this PersonDeleteOptions.
def ratings(self, ratings): self._ratings = ratings
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_rating(self, **kwargs):\n\n data = dict()\n data['value'] = kwargs.get('value') or None\n\n path = self._get_movie_id_path('rating_delete')\n resp = self._delete_method(path, kwargs, data)\n\n return resp", "def rating_id(self, rating_id: int):\n\n self._rating_id = rating_id", "def set_rating(self, value):\n try:\n self._rating = float(value)\n except ValueError:\n pass", "def set_strength_ratios(\n self,\n strength_ratios: Union[float, Tuple[float], np.ndarray],\n ):\n self._strength_ratios = np.clip(\n _convert_to_np_array(strength_ratios, self._num_motors), 0, 1)", "def delete_ratings_table(self):\r\n\r\n sql_command = \"\"\"\r\n DELETE FROM UserRecommendations;\r\n \"\"\"\r\n self.controller.execute(sql_command)\r\n self.connection.commit()", "def setDeletesThreshold(self, v):\n return self._set(deletesThreshold=v)", "def dist_by_rating(self):\n return ratings_distribution", "def rating(self, rating):\n if (self.local_vars_configuration.client_side_validation and\n rating is not None and rating > 5): # noqa: E501\n raise ValueError(\"Invalid value for `rating`, must be a value less than or equal to `5`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n rating is not None and rating < 1): # noqa: E501\n raise ValueError(\"Invalid value for `rating`, must be a value greater than or equal to `1`\") # noqa: E501\n\n self._rating = rating", "def reviewers(self, reviewers):\n\n self._reviewers = reviewers", "def set_radius_distribution(self, distributions_props, mode='pore', labels=None):\n\n from .Stats import distribution\n radii = []\n\n if mode not in [\"pore\", \"throat\"]:\n mode = \"pore\"\n\n if mode == \"pore\":\n n = self.graph.number_of_nodes()\n if labels is not None:\n indices = labels\n else:\n indices = self.graph.nodes\n\n elif mode == \"throat\":\n n = self.graph.number_of_edges()\n\n # On génère 2 x plus de rayons que de pores, puis on mélange et on prend les n premiers\n for properties, frac in distributions_props:\n dist = properties.get(\"func\", distribution)\n r = list(dist(n_samples=int(np.ceil(frac*n*2)), **properties))\n radii.extend(r)\n\n np.random.shuffle(radii)\n radii = radii[0:n]\n\n if mode == \"pore\":\n nx.set_node_attributes(self.graph, dict(\n zip(indices, radii)), 'radius')\n for n1, n2 in self.graph.edges:\n try:\n self.graph[n1][n2]['radius'] = min(\n self.graph.nodes[n1]['radius'], self.graph.nodes[n2]['radius'])\n except:\n warn(\n \"Cannot assign radius value to throat between nodes\", n1, \" and \", n2)\n\n elif mode == \"throat\":\n nx.set_edge_attributes(self.graph, dict(\n zip(self.graph.edges, radii)), 'radius')\n for node in self.graph.nodes:\n try:\n if self.graph.degree(node) > 0:\n self.graph.nodes[node]['radius'] = np.array(\n [self.graph[node][neighbor]['radius'] for neighbor in self.graph[node]]).max()\n except:\n warn(\"Cannot assign radius value to node\", node)", "def rating(self, **kwargs):\n\n data = dict()\n data['value'] = kwargs.get('value') or None\n\n path = self._get_movie_id_path('rating')\n resp = self._post_method(path, kwargs, data)\n\n return resp", "def delete(self, request, pk=None, **kwargs):\n rate_delete = self.get_object()\n rate_delete.delete()\n return Response({\"message\": \"Your rate was successfully deleted\"},\n status.HTTP_200_OK)", "def _rate_exploration(self, exp_id, num_ratings, rating):\n # Each user id needs to be unique since each user can only give an\n # exploration one rating.\n user_ids = ['user%d' % i for i in range(num_ratings)]\n for user_id in user_ids:\n rating_services.assign_rating_to_exploration(\n user_id, exp_id, rating)", "def get_ratings(self):\n return Vote.objects.filter(content_type=self.get_content_type(), object_id=self.instance.pk, key=self.field.key)", "def rating_description(self, rating_description):\n\n self._rating_description = rating_description", "def set_params(self, rate, radius):\n self.radius = radius\n self.learning_rate = rate\n self.neighborhood.radius = radius", "def setRatingFilter(self, min = 0, max = 100):\n self._updateMovieList = self._updateMovieList or self._filterRatingMin != min or self._filterRatingMax != max\n self._filterRatingMin, self._filterRatingMax = min, max", "def setRatingFilter(self, min = 0, max = 100):\n self._updateMovieList = self._updateMovieList or self._filterRatingMin != min or self._filterRatingMax != max\n self._filterRatingMin, self._filterRatingMax = min, max", "def setRatingFilter(self, min = 0, max = 100):\n self._updateMovieList = self._updateMovieList or self._filterRatingMin != min or self._filterRatingMax != max\n self._filterRatingMin, self._filterRatingMax = min, max", "def set_density_params(self, theta, scale=None):\n self.density.loc = self.mean(theta)\n if scale is not None:\n self.density.scale = scale", "def player_a_rating(self, player_a_rating):\n\n self._player_a_rating = player_a_rating", "def ratings(self):\n session = Session.object_session(self)\n return session.query(Rating).join(Section).filter(Section.professor == self).all()", "def _on_articles_rating(self, evt=None, rating=None):\n \n # get selected articles\n articles = self._articles_view.GetSelectedArticles()\n if not articles:\n return\n \n # get rating from event\n if evt is not None:\n evt_id = evt.GetId()\n \n if evt_id == ID_ARTICLES_RATING_0:\n rating = 0\n elif evt_id == ID_ARTICLES_RATING_1:\n rating = 1\n elif evt_id == ID_ARTICLES_RATING_2:\n rating = 2\n elif evt_id == ID_ARTICLES_RATING_3:\n rating = 3\n elif evt_id == ID_ARTICLES_RATING_4:\n rating = 4\n elif evt_id == ID_ARTICLES_RATING_5:\n rating = 5\n else:\n return\n \n # check rating\n if rating is None:\n return\n \n # set rating and update library\n for article in articles:\n article.rating = rating\n self._library.update(article)\n \n # refresh collections view\n self._collections_view.UpdateCounts()\n \n # refresh articles view\n self._articles_view.ShowArticles()\n \n # re-select articles\n self._articles_view.SetSelectedArticles(articles)", "def recommended_values(self, recommended_values):\n\n self._recommended_values = recommended_values", "def update_delete_options(\n self,\n ) -> Union[\n bulk_persistence.BulkUDCompileState.default_update_options,\n Type[bulk_persistence.BulkUDCompileState.default_update_options],\n ]:\n\n if not self._is_crud:\n raise sa_exc.InvalidRequestError(\n \"This ORM execution is not against an UPDATE or DELETE \"\n \"statement so there are no update options.\"\n )\n return self.execution_options.get(\n \"_sa_orm_update_options\",\n bulk_persistence.BulkUDCompileState.default_update_options,\n )", "def _apply_driver_options(self, driver_options):\n\n for optname, optval in driver_options.items():\n if optname in self._driver_options:\n self._driver_options[optname] = optval", "def getAllJudgeRatings(self):\n\n judgesExcelLogger.info(\"getAllJudgeRatings: Attempting to get ratings from all judges \"\n \"for set '%s'\", self.setName)\n try:\n for judgeName in self.judgeNames:\n self.judgeToRating[judgeName] = self.getRatingsFromJudge(judgeName)\n except:\n judgesExcelLogger.warning(\"getAllJudgeRatings: {0}: {1}\".format(sys.exc_info()[0].__name__,\n str(sys.exc_info()[1])))", "def d_rate(self, d_rate):\n\n self._d_rate = d_rate", "def rating_date(self, rating_date):\n\n self._rating_date = rating_date", "def reset(self):\n self.items = np.arange(self.ratings.shape[1])" ]
[ "0.5852431", "0.47767842", "0.457218", "0.4555128", "0.4551001", "0.4544296", "0.4539956", "0.4499743", "0.43452004", "0.4328597", "0.43138883", "0.42836276", "0.42558882", "0.42526865", "0.42484444", "0.42158845", "0.41451105", "0.41451105", "0.41451105", "0.41185495", "0.4086224", "0.408116", "0.40752393", "0.40517357", "0.4025717", "0.40049386", "0.3981869", "0.39811298", "0.39775053", "0.39725485" ]
0.6189722
1
Sets the comments of this PersonDeleteOptions.
def comments(self, comments): self._comments = comments
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def comments(self, comments):\n if comments is not None and len(comments) > 1000:\n raise ValueError(\"Invalid value for `comments`, length must be less than or equal to `1000`\") # noqa: E501\n\n self._comments = comments", "def comments(self, comments):\n\n self.container['comments'] = comments", "def change_comments(self, new_comments):\n\n self.comments = new_comments", "def comment(self, comment):\n\n self.logger.debug(\"In 'comment' setter.\")\n\n self._comment = comment", "def set_comment(self, comment):\n\t\tself.comment_ = comment", "def set_comment(self, comment):\n self.comment_text = str(comment)", "def comment_option(args, run):\n run.meta_info[\"comment\"] = args", "def comment(self, comment):\n self.logger.debug(\"In 'comment' setter.\")\n\n if len(comment) > 512:\n raise Exception(\"Comment is too long, must be less than 512 characters.\")\n\n self._comment = comment", "def delete_comments(redditor):\n\n for index, comment in enumerate(redditor.comments.new(limit=None)):\n print(\"Deleting comment {}\".format(index))\n comment.edit(\"-\")\n comment.delete()", "def comment(self, *comments):\n for comment in comments:\n self._p('[*]', comment)", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment: str):\n\n self._comment = comment", "def comment(self, comment: str):\n\n self._comment = comment", "def can_delete_comments(self):\n # Implemented from template for\n # osid.resource.ResourceAdminSession.can_delete_resources\n # NOTE: It is expected that real authentication hints will be\n # handled in a service adapter above the pay grade of this impl.\n return True", "def comment(self, value: str):\n self._comment = value", "def delete_comment(self, uid: str):\n pass", "def set_doc_comment(self, doc, comment):\n if not self.doc_comment_set:\n self.doc_comment_set = True\n doc.comment = comment\n else:\n raise CardinalityError('Document::Comment')", "def comment(self, comment): # type: (str) -> None\n self._tmp_comment = comment", "def comment(self, comment) :\n\t\ttry :\n\t\t\tself._comment = comment\n\t\texcept Exception as e:\n\t\t\traise e", "def set_comments(self, id, comments):\n logging.debug(f\"\"\"__set_comments {comments} for id {id}\"\"\")\n sql = f\"\"\"update {self.schemaRepo}.tablediff\n set comments = '{comments}' where id = {id}\"\"\"\n conn = self.connect(self.cxRepo)\n with conn:\n with conn.cursor() as curs:\n try:\n curs.execute(sql)\n except conn.DatabaseError as exc:\n error, = exc.args\n logging.error(f\"\"\"error executing {sql} : {error}\"\"\")", "def cmd_comment_delete(client, args):\n delete_comment = client.delete_comment(args.comment_id)\n generate_output({'delete_comment': delete_comment})" ]
[ "0.6005803", "0.5943034", "0.533463", "0.52282166", "0.51729524", "0.5094263", "0.4986265", "0.49235", "0.49029818", "0.48535043", "0.48440555", "0.48440555", "0.48440555", "0.48440555", "0.48440555", "0.48440555", "0.48440555", "0.48440555", "0.48440555", "0.48440555", "0.48126605", "0.48126605", "0.4735328", "0.47268143", "0.47248125", "0.46517888", "0.4649123", "0.45893306", "0.45836705", "0.45515615" ]
0.6326292
0
Sets the collection_feedback of this PersonDeleteOptions.
def collection_feedback(self, collection_feedback): self._collection_feedback = collection_feedback
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_as_not_feedback(self):\n self.feedback = False", "def select_delete_collection(self, expec_fail=False):\n delete_collection_sitem = self.locator_finder_by_xpath(self.delete_collection_id, expec_fail=expec_fail)\n delete_collection_sitem.click()\n time.sleep(1)\n delete_collection_confirm_sitem = self.locator_finder_by_xpath(self.delete_collection_confirm_id)\n delete_collection_confirm_sitem.click()", "def setCollection(self, collection):\n self.collectionName = collection[\"name\"]\n self.collectionType = collection[\"type\"]\n return", "def set_collection(self, collection_attr):\n if self.collection_menu:\n self.collection_menu.destroy()\n\n self.collection_menu = _CollectionEditorMenu(collection=collection_attr)\n self.collection_menu.pack(fill=BOTH, expand=TRUE, padx=10, pady=10)", "def set_as_feedback(self):\n if self.type == MessageTypes.AGENT:\n raise InvalidMessageTypeError(\n 'Cannot set feedback as True when msg is of type Agent')\n self.feedback = True", "def delete_with_http_info(self, collection_id, **kwargs):\n\n all_params = ['collection_id']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'collection_id' is set\n if ('collection_id' not in params) or (params['collection_id'] is None):\n raise ValueError(\"Missing the required parameter `collection_id` when calling `delete`\")\n\n\n collection_formats = {}\n\n path_params = {}\n if 'collection_id' in params:\n path_params['collectionId'] = params['collection_id']\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # Authentication setting\n auth_settings = ['basicAuth']\n\n return self.api_client.call_api('/api/v1/collections/{collectionId}', 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='SimpleResponse',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def delete_collection(self, collection_name, collection_locator, is_cluster):\n print(f\"Deleting {collection_name} collection started \\n\")\n self.navbar_goto(\"collections\")\n\n try:\n self.locator_finder_by_xpath(collection_locator).click()\n\n # we don't care about the cluster specific things:\n self.select_settings_tab(is_cluster)\n self.select_delete_collection()\n\n print(f\"Deleting {collection_name} collection Completed \\n\")\n self.webdriver.refresh()\n except TimeoutException:\n print(\"TimeoutException occurred! \\n\")\n print(\"Info: Collection has already been deleted or never created. \\n\")\n except NoSuchElementException:\n print('Element not found, which might be happen due to force cleanup.')\n except Exception as ex:\n traceback.print_exc()\n raise Exception(\"Critical Error occurred and need manual inspection!! \\n\") from ex\n self.webdriver.refresh()", "def setDeletesThreshold(self, v):\n return self._set(deletesThreshold=v)", "def test_DELETE_feedback(self):\n\t\t# 1\n\t\tfeedback_ids = []\n\t\tfor i in range(4):\n\t\t\tdata = self.POST_feedback()\n\t\t\tfeedback_ids.append(data['_id'])\n\t\t# 2\n\t\tfor i in range(len(feedback_ids)):\n\t\t\tfeedback_id = feedback_ids[i]\n\t\t\tself.DELETE('/api/feedback/' + feedback_id)\n\n\t\t\tdata = self.GET_data('/api/feedback/search')\n\t\t\tself.assertEqual(len(feedback_ids) - i - 1, len(data))\n\n\t\t\tlist = self.GET_data('/api/list/' + self.list_id)\n\t\t\tself.assertEqual(len(feedback_ids) - i - 1, len(list['feedbacks']))", "def collection_value_field(self, collection_value_field):\n\n self._collection_value_field = collection_value_field", "def _reset_collection(self):\r\n\r\n self._meta.queryset._document._collection = None\r\n self._meta.queryset._collection_obj = self._meta.queryset._document._get_collection()\r\n if hasattr(self._meta.queryset, '_reset_already_indexed'):\r\n self._meta.queryset._reset_already_indexed()", "def delete_feedback(feedback_id):\n\n feedback = Feedback.query.get_or_404(feedback_id)\n recipient = feedback.recipient\n \n db.session.delete(feedback)\n db.session.commit()\n\n return redirect(f'/users/{recipient}')", "def _fill_feedback(self):\n # To be overrided in child\n raise Exception(\"Must override in child.\")", "def delete(cls, collection, uid):\n result = collection.remove({\"_id\": cls.object_id(uid)})\n return result", "def delete_collection(self, collection_name: str):\n return self.http.collections_api.update_collections(\n storage_operations=StorageOperationsAnyOf2(\n delete_collection=collection_name\n )\n )", "def setCleanupTool(self,value):\n self.PDFreactorConfiguration.in1[\"cleanupTool\"] = value", "def collection_time(self, collection_time):\n\n self._collection_time = collection_time", "def reject(self, feedback=None):\n self.hit.generate_connection()\n self.hit.connection.reject_assignment(self.mturk_id, feedback=feedback)\n self.update()", "def team_members_id_design_comments_fk_delete_with_http_info(self, id, fk, **kwargs):\n\n all_params = ['id', 'fk']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method team_members_id_design_comments_fk_delete\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `team_members_id_design_comments_fk_delete`\")\n # verify the required parameter 'fk' is set\n if ('fk' not in params) or (params['fk'] is None):\n raise ValueError(\"Missing the required parameter `fk` when calling `team_members_id_design_comments_fk_delete`\")\n\n\n collection_formats = {}\n\n resource_path = '/TeamMembers/{id}/designComments/{fk}'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n if 'fk' in params:\n path_params['fk'] = params['fk']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def test_delete_collection(self):\n pass", "def device_stats_collection(self, device_stats_collection):\n\n self._device_stats_collection = device_stats_collection", "def delete(self):\n if not self.attached:\n raise CastleCollectionNotAttachedException()\n raise Exception(\"TODO\")", "def delete_collection_with_http_info(self, bucket_id, collection_id, **kwargs):\n\n all_params = ['bucket_id', 'collection_id', 'if_match', 'if_none_match', 'fields']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_collection\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'bucket_id' is set\n if ('bucket_id' not in params) or (params['bucket_id'] is None):\n raise ValueError(\"Missing the required parameter `bucket_id` when calling `delete_collection`\")\n # verify the required parameter 'collection_id' is set\n if ('collection_id' not in params) or (params['collection_id'] is None):\n raise ValueError(\"Missing the required parameter `collection_id` when calling `delete_collection`\")\n\n if 'if_match' in params and not re.search('\\\\\\\"[0-9]+\\\\\\\"', params['if_match']):\n raise ValueError(\"Invalid value for parameter `if_match` when calling `delete_collection`, must conform to the pattern `/\\\\\\\"[0-9]+\\\\\\\"/`\")\n if 'if_none_match' in params and not re.search('\\\\\\\"[0-9]+\\\\\\\"', params['if_none_match']):\n raise ValueError(\"Invalid value for parameter `if_none_match` when calling `delete_collection`, must conform to the pattern `/\\\\\\\"[0-9]+\\\\\\\"/`\")\n\n collection_formats = {}\n\n resource_path = '/buckets/{bucket_id}/collections/{collection_id}'.replace('{format}', 'json')\n path_params = {}\n if 'bucket_id' in params:\n path_params['bucket_id'] = params['bucket_id']\n if 'collection_id' in params:\n path_params['collection_id'] = params['collection_id']\n\n query_params = {}\n if 'fields' in params:\n query_params['_fields'] = params['fields']\n collection_formats['_fields'] = 'csv'\n\n header_params = {}\n if 'if_match' in params:\n header_params['If-Match'] = params['if_match']\n if 'if_none_match' in params:\n header_params['If-None-Match'] = params['if_none_match']\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['basicAuth']\n\n return self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Deleted',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def autodelete(self, autodelete):\n \n self._autodelete = autodelete", "def add_feedback(self, feedback):\n self.feedback.append(feedback)\n if not isinstance(feedback.parent, (int, str)) and feedback.parent is not None:\n feedback.parent._get_child_feedback(feedback, True)\n self.execute_hooks('pedal.report', 'add_feedback', (feedback,))\n return feedback", "def test_delete_collection_cluster_policy_binding(self):\n pass", "def vertree_delete(self):\n if not self.attached:\n raise CastleCollectionNotAttachedException()\n raise Exception(\"TODO\")", "def delete_record(self, collection_name, delete_condition):\n try:\n self.logger.info('in delete_record()')\n collection = self.get_db()[collection_name]\n collection.delete_one(delete_condition)\n self.logger.info('out delete_record()')\n except Exception as e:\n self.logger.error(f'Error occurred while deleting record {e}')", "def delete(self):\n if self.parent:\n assert isinstance(self.parent, Collection) # only know how to delete from Collection parents\n self.parent.delete_child(self)\n else:\n self._mark_deleted()", "def set_feedback_type(self, feedback_type):\r\n return self._arm.set_feedback_type(feedback_type)" ]
[ "0.4660153", "0.4479887", "0.44771156", "0.44402534", "0.44323143", "0.43931454", "0.42445672", "0.42226908", "0.42033428", "0.41435018", "0.4123928", "0.41066483", "0.40954313", "0.40865862", "0.40662232", "0.40604806", "0.4026898", "0.39949268", "0.39785284", "0.39696592", "0.39551842", "0.39518586", "0.39467153", "0.39351285", "0.39289364", "0.3918832", "0.39110917", "0.39097285", "0.39021352", "0.38971695" ]
0.70743954
0
Sets the receiver_group of this PersonDeleteOptions.
def receiver_group(self, receiver_group): self._receiver_group = receiver_group
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_group(self, group):\n self._group = group", "def remove_from_group(self, org, contact, group):\n pass", "def set_group(self, group: str) -> None:\n self.group = group", "def delete_group(self, group):\n raise NotImplementedError('delete_group')", "def group(self, group):\n self._group = group", "def setGroup(self, group):\n\t\tself.config.GROUP = group", "def group(self, group):\n\n self._group = group", "def group(self, group):\n\n self._group = group", "def group(self, group):\n\n self._group = group", "def set_receiver(self, receiver):\n self.receiver = receiver", "async def async_set_multiroom_group(self, multiroom_group):\n self._multiroom_group = multiroom_group", "def group(self, group):\n self.proxy_group = group\n return self", "def price_group(self, price_group: str):\n\n self._price_group = price_group", "def receiver(self, receiver: str):\n if receiver is None:\n raise ValueError(\"Invalid value for `receiver`, must not be `None`\") # noqa: E501\n\n self._receiver = receiver", "def set_group(self, group):\n # Implemented from template for osid.resource.ResourceForm.set_group_template\n if self.get_group_metadata().is_read_only():\n raise errors.NoAccess()\n if not self._is_valid_boolean(group):\n raise errors.InvalidArgument()\n self._my_map['group'] = group", "def group_identifier(self, group_identifier):\n\n self._group_identifier = group_identifier", "def set_selected_group(self, group_id):\n self.contact_list = self.contacts_by_group_list[group_id - 1][1][1]\n\n\t# Return the contact list so far", "def detach_channel_group_from_port(self, group):\n ckresult(_dll.FMOD_System_DetachChannelGroupFromPort(self._ptr, group._ptr))", "def with_group(self, group):\n\t\tself.variables['group'] = group\n\t\treturn self", "def set_service_group(self, service_group):\n self.single_selection_from_static_kendo_dropdown(self.service_group_kendo_dropdown_locator, service_group)", "def group_id(self, group_id):\n\n self._group_id = group_id", "def group_id(self, group_id):\n\n self._group_id = group_id", "def group_id(self, group_id):\n\n self._group_id = group_id", "def group_id(self, group_id):\n\n self._group_id = group_id", "def group_id(self, group_id):\n\n self._group_id = group_id", "def group_id(self, group_id):\n\n self._group_id = group_id", "def subject_group(self, subject_group):\n\n self._subject_group = subject_group", "def set_channel_group(self, channel_group):\n super().set_channel_group(channel_group)\n self.skip_flags = self.flagspace.all_flags() # everything but 0", "def destination_group_id(self, destination_group_id):\n\n self._destination_group_id = destination_group_id", "def remove_from_group(self, group):\n\n if self.in_group(group):\n self.secondary_groups.remove(group)\n return self" ]
[ "0.56648105", "0.5629727", "0.55333155", "0.54876494", "0.545211", "0.54422307", "0.5429853", "0.5429853", "0.5429853", "0.5285243", "0.5226676", "0.5157121", "0.5043886", "0.5042069", "0.4996538", "0.49878687", "0.4980231", "0.4943608", "0.4893715", "0.4893672", "0.48786703", "0.48786703", "0.48786703", "0.48786703", "0.48786703", "0.48786703", "0.48279238", "0.47584528", "0.4728097", "0.4702777" ]
0.73670137
0
Validate the identifier against the prefix's pattern, if it exists.
def validate(prefix: str, identifier: str) -> Optional[bool]: pattern = get_pattern_re(prefix) if pattern is None: return None return bool(pattern.match(normalize_identifier(prefix, identifier)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_identifier(self, identifier):\n pass", "def _validate_identifier(self, identifier):\n for c in identifier:\n if c not in string.letters + string.digits + '_':\n return False\n return True", "def validate_kf_id(kf_id, prefix='TA'):\n if len(kf_id) != 11 or kf_id[:3] != prefix+'_':\n abort(400, f\"'{kf_id}' is not a valid kf_id\")", "def validate_identifier(identifier: str) -> bool:\n if identifier[:2] == 'NR':\n return True\n\n if len(identifier) < 9:\n return False\n\n try:\n d = int(identifier[-7:])\n if d == 0:\n return False\n except ValueError:\n return False\n # TODO This is not correct for entity types that are not Coops\n if identifier[:-7] not in ('CP', 'XCP', 'BC'):\n return False\n\n return True", "def stillLookingForPrefix(self, prefix):\n return prefix in self._prefixToIdentifiers", "def _validate(cls, pid_value):\n blop = re.compile('^[-\\w]+$')\n if not bool(blop.match(pid_value)):\n raise ValidationError(\n 'The ID should contain only letters with numbers or dashes.',\n field_name='id',\n )", "def is_pattern_prefix(\n self, pipeline: str, label: str, prefix: str\n ) -> bool:", "def __validate_col_prefix(col, prefixes, input_name):\n\n missing = [not col.startswith(p) for p in prefixes]\n if all(missing):\n msg = (\"Input {0} column {1!r} does not start with a valid \"\n \"prefix: {2!r}. Please ensure that the {0} column \"\n \"names specify the correct resource prefix.\")\n e = msg.format(input_name, col, prefixes)\n logger.error(e)\n raise InputError(e)", "def check_prefix(custom_str: str) -> bool:\r\n\r\n return len(custom_str) == 0", "def IsValidIdentifier(string):\n counter = 0\n if string in keyword.kwlist:\n feedback = (False, \"Invalid: can't use a keyword as your identifier!\")\n return feedback\n if not (string[0].isalpha() or string[0] == \"_\"):\n feedback = (False, \"Invalid: first character must be alphabetic or underscore!\")\n return feedback\n for letter in string[1:]:\n counter += 1\n if not (letter.isalnum() or letter == \"_\"):\n screen_out = \"Invalid: character '%s' at index %d!\" % (letter, counter)\n feedback = (False, screen_out)\n return feedback\n return (True, \"Valid!\")", "def check_valid_ip_prefix(value):\n prefix = int(value)\n if prefix < 0 or prefix > 32:\n raise argparse.ArgumentTypeError('{} is an invalid IPv4 prefix'.format(prefix))\n return prefix", "def hasNonstandardIdentifierBeginningWith(self, *args):\n return _libsbml.SBase_hasNonstandardIdentifierBeginningWith(self, *args)", "def id_check(employee_id):\r\n# badge_pattern = re.compile('[A-Za-z]{2}-\\d{4}')\r\n# re.search(badge_pattern, employee_id)\r\n\r\n # if statement\r\n if not re.match('[A-Z]{2}-\\d{4}', employee_id):\r\n print(employee_id, 'is not a valid ID.')", "def hasIdentifierBeginningWith(self, *args):\n return _libsbml.SBasePlugin_hasIdentifierBeginningWith(self, *args)", "def startsWith(self, prefix):\n pointer = self.tries\n for i in range(len(prefix)):\n ascii = ord(prefix[i]) - ord('a')\n if pointer[ascii] == None:\n return False\n pointer = pointer[ascii]\n return True", "def validate_cid_regex(cid: str) -> None:\n if not re.match(fr\"^{compound_settings.PREFIX}CID\\d0\\d+$\", cid):\n raise ValidationError(\n f\"Invalid format. Expected {compound_settings.PREFIX}CID$0######.\"\n )", "def validate_individual_identifiers(identifier: str, cpf: bool = True) -> bool:\n identifier = re.sub(r\"\\-|\\.|/\", \"\", identifier)\n dv = identifier[:-2]\n\n CPF_WEIGHTS = (11, 10, 9, 8, 7, 6, 5, 4, 3, 2)\n CNPJ_WEIGHTS = (6, 5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2)\n\n if cpf:\n check = calculate_id_digit(numbers=dv, weights=CPF_WEIGHTS[1:])\n check = calculate_id_digit(numbers=check, weights=CPF_WEIGHTS)\n else:\n check = calculate_id_digit(numbers=dv, weights=CNPJ_WEIGHTS[1:])\n check = calculate_id_digit(numbers=check, weights=CNPJ_WEIGHTS)\n\n return identifier == check", "def match(self, encoded):\n encoded = check_unicode(encoded)\n return encoded.startswith(self.PREFIX)", "def is_identifier(s, dotted=False, prefix=False):\n if not isinstance(s, six.string_types):\n raise TypeError(\"is_identifier(): expected a string; got a %s\"\n % (type(s).__name__,))\n if six.PY3:\n if prefix:\n return is_identifier(s + '_', dotted=dotted, prefix=False)\n if dotted:\n return all(is_identifier(w, dotted=False) for w in s.split('.'))\n return s.isidentifier() and not _my_iskeyword(s)\n\n if prefix:\n if not s:\n return True\n if dotted:\n return bool(\n _dotted_name_prefix_re.match(s) and\n not any(_my_iskeyword(w) for w in s.split(\".\")[:-1]))\n else:\n return bool(_name_re.match(s))\n else:\n if dotted:\n # Use a regular expression that works for dotted names. (As an\n # alternate implementation, one could imagine calling\n # all(is_identifier(w) for w in s.split(\".\")). We don't do that\n # because s could be a long text string.)\n return bool(\n _dotted_name_re.match(s) and\n not any(_my_iskeyword(w) for w in s.split(\".\")))\n else:\n return bool(_name_re.match(s) and not _my_iskeyword(s))", "def validated_name(cls, name):\n if (name[:5] == 'hive-'\n and name[5] in ['1', '2', '3']\n and re.match(r'^hive-[123]\\d{4,6}$', name)):\n return name\n return None", "def autoprefix(prefix):\n pl = len(prefix)\n msg = '%%(s)r: expected some name after %(prefix)r!' % locals()\n def checker(s):\n if s.startswith(prefix):\n tail = s[pl:]\n if tail:\n return prefix + dotted_name(tail)\n else:\n raise ValueError(msg % locals())\n elif s:\n return prefix + dotted_name(s)\n else:\n return ''\n return checker", "def normalize_identifier(prefix: str, identifier: str) -> str:\n # A \"banana\" is an embedded prefix that isn't actually part of the identifier.\n # Usually this corresponds to the prefix itself, with some specific stylization\n # such as in the case of FBbt. The banana does NOT include a colon \":\" at the end\n banana = get_banana(prefix)\n if banana:\n banana = f\"{banana}:\"\n if not identifier.startswith(banana):\n return f\"{banana}{identifier}\"\n # Handle when the namespace is in the LUI, but no specific banana\n # has been given. This is most common for OBO Foundry ontologies'\n # identifiers, like CHEBI:XXXX\n elif namespace_in_lui(prefix):\n banana = f\"{prefix.upper()}:\"\n if not identifier.startswith(banana):\n return f\"{banana}{identifier}\"\n\n # TODO Unnecessary redundant prefix?\n # elif identifier.lower().startswith(f'{prefix}:'):\n #\n\n return identifier", "def test_bad_uuid_lowercase():\n bad_uiid_lower = \"7cfb2470-b600-4eb3-a2cd-c1439e45b91g\"\n m = CannedRe.UUID.match(bad_uiid_lower)\n assert m is None, \"Canned RegEx uiid test succeeded for %s while it should not\" % bad_uiid_lower", "def _check_valid_namespace_identifier(self, identifier: Union[str, Identifier]) -> Identifier:\n identifier_tuple = Catalog.identifier_to_tuple(identifier)\n if len(identifier_tuple) < 1:\n raise NoSuchNamespaceError(f\"Empty namespace identifier: {identifier}\")\n return identifier_tuple", "def test_kf_id():\n\n for _ in range(1000):\n prefix = ''.join(random.sample(string.ascii_uppercase, 2))\n kf_id = kf_id_generator(prefix)()\n assert kf_id[:2] == prefix\n assert len(kf_id) == 11\n assert kf_id[2] == '_'\n\n assert 'I' not in kf_id[2:]\n assert 'L' not in kf_id[2:]\n assert 'O' not in kf_id[2:]\n assert 'U' not in kf_id[2:]\n\n assert re.search(r'^'+prefix+r'_[A-HJ-KM-NP-TV-Z0-9]{8}', kf_id)", "def _validate(self, s: str):\n if not s.isidentifier():\n raise ValueError(('Invalid Django project name \"{}\": '\n 'must be a valid Python identifier').format(s))", "def _validate(self, s: str):\n if not s.isidentifier():\n raise ValueError(('Invalid Django project name \"{}\": '\n 'must be a valid Python identifier').format(s))", "def is_prefix(prefix: str, word: str):\n return word.startswith(prefix)", "def validate_uuid(self, uuid):\n match = re.match(\n r'([a-z0-9]+)-([a-z0-9]+)-([a-z0-9]+)-([a-z0-9]+)-([a-z0-9]+)',\n uuid\n )\n if match:\n return True\n\n return False", "def is_prefix(uri1,uri2):\n if uri2[:len(uri1)]==uri1:\n return 1\n else:\n return None" ]
[ "0.7408097", "0.7396331", "0.6896986", "0.6826547", "0.64809126", "0.6387843", "0.63224006", "0.6290734", "0.6247484", "0.6160539", "0.61340904", "0.61145294", "0.605325", "0.6044549", "0.6044547", "0.60234606", "0.6018778", "0.5991968", "0.59837234", "0.59392786", "0.5905442", "0.58974856", "0.5888531", "0.588397", "0.5854059", "0.5838473", "0.5838473", "0.5836314", "0.5786293", "0.5763229" ]
0.8497021
0
Normalize the identifier with the appropriate banana.
def normalize_identifier(prefix: str, identifier: str) -> str: # A "banana" is an embedded prefix that isn't actually part of the identifier. # Usually this corresponds to the prefix itself, with some specific stylization # such as in the case of FBbt. The banana does NOT include a colon ":" at the end banana = get_banana(prefix) if banana: banana = f"{banana}:" if not identifier.startswith(banana): return f"{banana}{identifier}" # Handle when the namespace is in the LUI, but no specific banana # has been given. This is most common for OBO Foundry ontologies' # identifiers, like CHEBI:XXXX elif namespace_in_lui(prefix): banana = f"{prefix.upper()}:" if not identifier.startswith(banana): return f"{banana}{identifier}" # TODO Unnecessary redundant prefix? # elif identifier.lower().startswith(f'{prefix}:'): # return identifier
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _sanitize_to_identifer(name):\n n = name.strip()\n n = re.sub('/', ' ', n)\n n = re.sub('-', ' ', n)\n n = re.sub(' +', '_', n)\n n = re.sub('[\\W]+', '', n)\n return n", "def normalize(val):\n \n if val.find('-') != -1:\n val = val.replace('-','_')\n\n return val", "def normalize(name):\n name = name.lower()\n name = name.replace('-', '')\n name = name.replace(' ', '')\n return name", "def normalize(item):\n item = item.lower().strip().rstrip('_')\n return item", "def normalize(self, s):\n s = normalizing_regexp.sub('_', s)\n if s[0:1] in string.digits:\n s = '_' + s\n return s", "def normalize_bridge_id(bridge_id: str):\n bridge_id = bridge_id.lower()\n\n # zeroconf: properties['id'], field contains semicolons after each 2 char\n if len(bridge_id) == 17 and sum(True for c in \"aa:bb:cc:dd:ee:ff\" if c == \":\"):\n return bridge_id.replace(\":\", \"\")\n\n # nupnp: contains 4 extra characters in the middle: \"fffe\"\n if len(bridge_id) == 16 and bridge_id[6:10] == \"fffe\":\n return bridge_id[0:6] + bridge_id[-6:]\n\n # SSDP/UPNP and Hue Bridge API contains right ID.\n if len(bridge_id) == 12:\n return bridge_id\n\n logging.getLogger(__name__).warn(\"Received unexpected bridge id: %s\", bridge_id)\n\n return bridge_id", "def normalize_label(label):\n label = normalize('NFKD', label)\n label = re.sub('/[^a-z0-9-_:.]/g', '-', label)\n label = label.lower()\n return label", "def _clean_id(self, dirty_id):\n return self.wsid_regex.sub(\"\", dirty_id.replace(\" \", \"_\"))", "def scrub_gene_id(the_id):\n the_id = re.sub(r'(.*)\\.([0-9]{1,2})$', r'\\1_AT\\2', the_id)\n the_id = re.sub(r'\\W', r'_', the_id)\n return the_id", "def __normalize_name(self):\n self.normalized_name = normalizeSimplified(self.name)", "def clean_identifier(self, identifier):\n if isinstance(identifier, list):\n return '%2F'.join(identifier)\n else:\n return identifier", "def normalize_ads(val):\n m = ads_regexp.match(val)\n return m.group(2)", "def sanitize_id (self, id):\n return re.sub (self.sanitize_pat, '', id)", "def normalize_isbn(val):\n if is_isbn10(val):\n val = isbnlib.to_isbn13(val)\n return isbnlib.mask(isbnlib.canonical(val))", "def normalize(self):\n normalized = self.all_details.get('normalized', '')\n if normalized:\n return normalized\n\n if self.is_digit():\n self.all_details['normalized'] = 'Numeric'\n elif self.is_uuid():\n self.all_details['normalized'] = 'UUID'\n elif self.is_gibberish():\n self.all_details['normalized'] = 'Gibberish'\n else:\n for nr in self.normalized_regex_list:\n regex = nr['regex']\n groups = r'{}'.format(nr['groups'])\n ua = regex.sub(groups, self.user_agent)\n if ua != self.user_agent:\n self.all_details['normalized'] = ua\n break\n else:\n self.all_details['normalized'] = ''\n\n return self.all_details['normalized']", "def normalize_name(name):\n return PUNCT_RE.sub('-', name.lower()).strip('-')", "def normalize_osd_id(osd_id):\n if not isinstance(osd_id, str) or not osd_id.startswith('osd.'):\n osd_id = 'osd.' + str(osd_id)\n return osd_id", "def normalize_reference_name(name):\n return name.strip().lower().replace(\"-\", \"_\").replace(\" \", \"_\")", "def clean_abbreviations(x):\n # a few entries in Revenue were nan\n if pd.isnull(x):\n return np.nan\n elif 'K' in x:\n return int(float(x[:-1]) * 1e3)\n elif 'M' in x:\n return int(float(x[:-1]) * 1e6)\n elif 'B' in x:\n return int(float(x[:-1]) * 1e9)\n else:\n return int(x)", "def normalize(self, name):\n\n\t\t# label emojis, specifically :) and :( as @artist, then apply \n\t\t# base normalization\n\n\t\tname = super().normalize(re.sub(r'\\s*:[\\(\\)]\\s*',' @artist ', name))\n\t\t\n\t\t# if now name is ? it may be an artist, so label as @artist\n\t\tif name.strip() in {'?','...'}:\n\t\t\treturn '@artist'\n\t\t\n\t\t# fix ! - remove if at the end of a word, otherwise replace with i\n\t\tname = re.sub(r'\\!+$','', re.sub(r'\\!+(?=[^\\b\\w])','', name)).replace('!','i')\n\t\t\n\t\t# remove the and a\n\t\tname = re.sub(r'^(the|a)\\s+','', name)\n\t\t \n\t\t# remove multiple white spaces\n\t\tname = re.sub(r'\\s{2,}', ' ', name).strip()\n\t\t\n\t\treturn name", "def normalize_label(label: str) -> str:\n label = re.sub(r\"['\\\"`]+\", \"\", label) # remove apostrophes\n label = re.sub(r\"[-/\\\\ \\t_]+\", \" \", label) # normalize separators\n lower_count = sum(map(str.islower, label))\n upper_count = sum(map(str.isupper, label))\n if \" \" not in label and lower_count > 0 and upper_count > 0:\n # camel case to \"normal case\"\n label = re.sub(r\"([a-z])([A-Z])\", r\"\\g<1> \\g<2>\", label)\n label = re.sub(r\"(^[Tt]he |^[Aa] )\", \"\", label) # drop determiner\n return label.lower()", "def _normalize_show_name(name):\n\tname = name.casefold()\n\tname = re.sub(\"[^a-z0-9]\", \" \", name)\n\tname = re.sub(\"_\", \" \", name)\n\tname = re.sub(\"season \\d( part \\d)?\", \" \", name)\n\tname = re.sub(\"\\s+\", \" \", name)\n\treturn name", "def normalize_uri(uri):\n if isinstance(uri, str):\n uri = uri.decode('utf-8')\n return uri.strip().replace(u' ', u'_')", "def normalize_name(self, value):\n import unicodedata\n import re\n\n self.log('Converting string %s' % value)\n \n # Double try in name conversion\n try:\n value = unicodedata.normalize('NFKD', u'%s' % value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n value = re.sub('[-\\s]+', '-', value)\n except:\n self.log('Conversion error: \\n%s' % traceback.format_exc())\n\n value = unicode(value, 'ascii', errors='ignore')\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n value = re.sub('[-\\s]+', '-', value)\n\n\n self.log('Conversion finished to %s' % value)\n\n return value", "def normalize_name(word):\n return word.strip(\"0123456789!@#$%^&*_() +=\\/?<>,.`~;:\").lower().replace(\" \",\"_\")", "def normalize(address):\n replacement = re.sub('\\W+', SEPARATOR, address.lower())\n\n processed = []\n for p in replacement.split(SEPARATOR):\n if not p:\n continue\n\n if p in ABBRS:\n processed.append(ABBRS[p])\n else:\n processed.append(p)\n\n processed.sort()\n\n normalized = SEPARATOR.join(processed)\n return normalized", "def _normalize_name(self, name, aggressive=False):\n stopwords = 'the', 'a'\n if aggressive:\n # Remove anything in brackets.\n name = re.sub(r'\\([^)]+\\)', '', name)\n # Some shows have a \"with Firstname Lastname\" suffix, like \"The Daily Show\n # with Jon Stewart\". Strip this out.\n # FIXME: hardcoded English\n name = re.sub(r'with +\\w+ +\\w+\\b', '', name)\n\n # Replace & with 'and' and remove other non-word characters\n name = re.sub(r'\\W', ' ', name.replace('&', 'and').replace('.', '').lower())\n # Remove stop words and remove whitespace.\n return remove_stop_words(name).replace(' ', '')", "def _transform_identifier(self, identifier, scheme):\n urlize = self.context.get(\"urlize_identifiers\", True)\n prefix_scheme = self.context.get(\"prefix_identifier_schemes\", True)\n result = None\n\n if urlize:\n result = idutils.to_url(identifier, scheme, url_scheme=\"https\")\n\n if not result and prefix_scheme and not identifier.startswith(scheme):\n result = f\"{scheme}:{identifier}\"\n\n return result or identifier", "def normalize_address(address):\n # Fix 'Place/Place' -> 'Place & Place'\n if re.findall(r'[a-zA-Z0-9]/[a-zA-Z0-9]', address):\n address = address.replace('/', ' & ')\n # Fix 'Place:Place' -> 'Place & Place'\n if re.findall(r'[a-zA-Z0-9]:[a-zA-Z0-9]', address):\n address = address.replace(':', ' & ')\n # Fix 'RD' -> 'Rd' & 'PK' -> 'Pk'\n if re.findall(r'[PRSA][KDTV]', address):\n address = re.sub(r'([PRSA][KDTV])', \\\n lambda x: x.group(0).title(), address)\n # Fix 'Bl' -> 'Blvd'\n if re.findall(r'(Bl)[\\ ]', address):\n address = address.replace('Bl', 'Blvd')\n # Fix 'w 156th' -> 'W 156th'\n if re.findall(r'[^a-zA-Z][wnse][/ ]', address):\n address = re.sub(r'[^a-zA-Z]([wnse])[/ ]', \\\n lambda x: x.group(0).upper(), address)\n # Fix '151 St' -> '151st St'\n if re.findall(r'[0-9][\\ ][SA][tv]', address):\n address = re.sub(r'[0-9]+', \\\n ordinal_conversion, address)\n return address", "def _normalize(self, metric_name, submit_method, prefix):\n metric_prefix = \"mongodb.\" if not prefix else \"mongodb.{0}.\".format(prefix)\n metric_suffix = \"ps\" if submit_method == RATE else \"\"\n\n # Replace case-sensitive metric name characters\n for pattern, repl in self.CASE_SENSITIVE_METRIC_NAME_SUFFIXES.iteritems():\n metric_name = re.compile(pattern).sub(repl, metric_name)\n\n # Normalize, and wrap\n return u\"{metric_prefix}{normalized_metric_name}{metric_suffix}\".format(\n normalized_metric_name=self.normalize(metric_name.lower()),\n metric_prefix=metric_prefix, metric_suffix=metric_suffix\n )" ]
[ "0.62498116", "0.6103178", "0.6090855", "0.60531324", "0.5999573", "0.59399766", "0.5901326", "0.58349013", "0.5803965", "0.580082", "0.57906383", "0.5772368", "0.5770924", "0.5757673", "0.57162386", "0.5711377", "0.5693682", "0.5654112", "0.5643497", "0.56413555", "0.5633282", "0.5616629", "0.5613404", "0.558698", "0.55735785", "0.55268764", "0.55144596", "0.5502588", "0.55013376", "0.5491795" ]
0.71285367
0
Get the identifiers.org URL for the given CURIE.
def get_identifiers_org_url(prefix: str, identifier: str) -> Optional[str]: curie = get_identifiers_org_curie(prefix, identifier) if curie is None: return None return f"https://identifiers.org/{curie}"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_url_from_doi(doi):\n\n try:\n r = requests.head(f\"https://doi.org/{doi}\", allow_redirects=True)\n except requests.exceptions.ConnectionError:\n return None\n\n return r.url", "def get_Curie_ref(self, val):\n if len(val) == 0:\n return URIRef(self.base)\n elif val[0] == \"[\":\n if val[-1] == \"]\":\n curie = val[1:-1]\n # A possible Blank node reference should be separated here:\n if len(curie) >= 2 and curie[0] == \"_\" and curie[1] == \":\":\n return _get_bnode_from_Curie(curie[2:])\n else:\n return self.get_resource(val[1:-1])\n else:\n # illegal CURIE...\n self.options.comment_graph.add_error(\"Illegal CURIE: %s\" % val)\n return None\n else:\n # check the value, to see if an error may have been made...\n # Usual protocol values in the URI\n v = val.strip().lower()\n protocol = urlparse.urlparse(val)[0]\n if protocol != \"\" and protocol not in usual_protocols:\n err = \"Possible URI error with '%s'; the intention may have been to use a protected CURIE\" % val\n self.options.comment_graph.add_warning(err)\n return self.get_URI_ref(val)", "def _construct_ebi_taxon_url(self, org):\n split = self._parse_organism_name(org)\n return \"http://www.ebi.ac.uk/ena/data/taxonomy/v1/taxon/scientific-name/{}%20{}\".format(split[0], split[1])", "def CURIE_to_URI(self, val) :\n\t\t# Just to be on the safe side:\n\t\tif val == \"\" :\n\t\t\treturn None\n\t\telif val == \":\" :\n\t\t\tif self.default_curie_uri :\n\t\t\t\treturn URIRef(self.default_curie_uri)\n\t\t\telse :\n\t\t\t\treturn None\n\n\t\t# See if this is indeed a valid CURIE, ie, it can be split by a colon\n\t\tcurie_split = val.split(':',1)\n\t\tif len(curie_split) == 1 :\n\t\t\t# there is no ':' character in the string, ie, it is not a valid CURIE\n\t\t\treturn None\n\t\telse :\n\t\t\tif self.state.rdfa_version >= \"1.1\" :\n\t\t\t\tprefix\t= curie_split[0].lower()\n\t\t\telse :\n\t\t\t\tprefix\t= curie_split[0]\n\t\t\treference = curie_split[1]\n\n\t\t\t#if len(reference) > 0 :\n\t\t\t#\tif self.state.rdfa_version >= \"1.1\" and (len(prefix) == 0 or prefix in self.ns) and reference.startswith('//') :\n\t\t\t#\t\t# This has been defined as illegal in RDFa 1.1\n\t\t\t#\t\tself.state.options.add_warning(err_absolute_reference % (reference, val), UnresolvableReference, node=self.state.node.nodeName)\n\t\t\t#\t\treturn None\n\t\t\t#\tif reference[0] == \":\" :\n\t\t\t#\t\treturn None\n\n\t\t\t# first possibility: empty prefix\n\t\t\tif len(prefix) == 0 :\n\t\t\t\tif self.default_curie_uri and self._check_reference(reference) :\n\t\t\t\t\treturn self.default_curie_uri[reference]\n\t\t\t\telse :\n\t\t\t\t\treturn None\n\t\t\telse :\n\t\t\t\t# prefix is non-empty; can be a bnode\n\t\t\t\tif prefix == \"_\" :\n\t\t\t\t\t# yep, BNode processing. There is a difference whether the reference is empty or not...\n\t\t\t\t\tif len(reference) == 0 :\n\t\t\t\t\t\treturn _empty_bnode\n\t\t\t\t\telse :\n\t\t\t\t\t\t# see if this variable has been used before for a BNode\n\t\t\t\t\t\tif reference in _bnodes :\n\t\t\t\t\t\t\treturn _bnodes[reference]\n\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t# a new bnode...\n\t\t\t\t\t\t\tretval = BNode()\n\t\t\t\t\t\t\t_bnodes[reference] = retval\n\t\t\t\t\t\t\treturn retval\n\t\t\t\t# check if the prefix is a valid NCNAME\n\t\t\t\telif ncname.match(prefix) :\n\t\t\t\t\t# see if there is a binding for this:\n\t\t\t\t\tif prefix in self.ns and self._check_reference(reference) :\n\t\t\t\t\t\t# yep, a binding has been defined!\n\t\t\t\t\t\tif len(reference) == 0 :\n\t\t\t\t\t\t\treturn URIRef(str(self.ns[prefix]))\n\t\t\t\t\t\telse :\n\t\t\t\t\t\t\treturn self.ns[prefix][reference]\n\t\t\t\t\telif prefix in self.default_prefixes and self._check_reference(reference) :\n\t\t\t\t\t\t# this has been defined through the default context\n\t\t\t\t\t\tif len(reference) == 0 :\n\t\t\t\t\t\t\treturn URIRef(str(self.default_prefixes[prefix][0]))\n\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t(ns,used) = self.default_prefixes[prefix]\n\t\t\t\t\t\t\t# lazy binding of prefixes (to avoid unnecessary prefix definitions in the serializations at the end...)\n\t\t\t\t\t\t\tif not used :\n\t\t\t\t\t\t\t\tself.graph.bind(prefix,ns)\n\t\t\t\t\t\t\t\tself.default_prefixes[prefix] = (ns,True)\n\t\t\t\t\t\t\treturn ns[reference]\n\t\t\t\t\telse :\n\t\t\t\t\t\t# no definition for this thing...\n\t\t\t\t\t\treturn None\n\t\t\t\telse :\n\t\t\t\t\treturn None", "def make_openid_url( email ):\n return os.path.join( CONFIG.SYNDICATE_OPENID_TRUSTROOT, \"id\", email )", "def resolveDoi(doi):\n logging.debug('Resolving DOI %s' % doi)\n doiUrl = 'https://doi.org/' + urllib.quote(doi.encode('utf8'))\n page = httpGetDelay(doiUrl)\n trgUrl = page['url']\n logging.debug('DOI %s redirects to %s' % (doi, trgUrl))\n return trgUrl", "def geo_url(self):\n from geoid.acs import AcsGeoid\n\n us = tiger_url(self.year, self.summary_level, AcsGeoid.parse(self.geoid).stusab)\n\n return parse_app_url(us)", "def inspire_url(self) -> str:\n return f\"https://inspirehep.net/literature/{self.inspirehep_id}\"", "def doi_to_url(doi, plos_network=False):\n URL_TMP = INT_URL_TMP if plos_network else EXT_URL_TMP\n return URL_TMP.format(doi)", "def url(self):\n if self.term_type != 'C':\n url_fmt = self.path_level_url_fmt\n url_info = {'id': self.term_type}\n else:\n url_fmt = self.obj_level_url_fmt\n url_info = {'org_prefix': self.org_prefix, 'id': self.term_id}\n\n return url_fmt % url_info", "def url(self):\n scheme, netloc, path, query, fragment = six.moves.urllib.parse.urlsplit(self.baseurl)\n url = six.moves.urllib.parse.urlunsplit((\n scheme, netloc, path + '.dods',\n self.id + hyperslab(self.slice) + '&' +\n '&'.join(self.selection), fragment)).rstrip('&')\n\n return url", "def get_inspire_url(data):\n url = \"\"\n if \"bai\" in data and data[\"bai\"]:\n url = \"http://inspirehep.net/author/profile/\" + data[\"bai\"]\n elif \"recid\" in data and data[\"recid\"]:\n url = \"http://inspirehep.net/record/\" + str(data[\"recid\"])\n else:\n url = \"http://inspirehep.net/hepnames\"\n return url", "def _get_bioregistry_link(prefix: str, identifier: str) -> Optional[str]:\n norm_prefix, norm_identifier = normalize_curie(prefix, identifier)\n if norm_prefix is None:\n return None\n return f\"{BIOREGISTRY_REMOTE_URL.rstrip()}/{norm_prefix}:{norm_identifier}\"", "def url_HITRANCIA():\n url=u\"https://hitran.org/data/CIA/\"\n return url", "def request_uri(self, identifier):\n path = self.PATH_TEMPLATE % (identifier, identifier)\n return self.api_baseurl + path", "def url(self):\n\n if not hasattr(self, \"_url\"):\n query = db.Query(\"query_term u\", \"u.value\")\n query.join(\"query_term t\", \"t.doc_id = u.doc_id\")\n query.where(f\"u.path = '{self.URL_PATH}'\")\n query.where(f\"t.path = '{self.TERM_PATH}'\")\n query.where(query.Condition(\"t.int_val\", self.id))\n rows = query.execute(self.loader.cdr_cursor).fetchall()\n self._url = rows[0].value if rows else \"\"\n return self._url", "def get_randori_base_url(self) -> str:\n return urljoin(self.endpoint_url, self.organization_name)", "def url_ExoMol():\n url=u\"http://www.exomol.com/db/\"\n return url", "def url_to_doi(url):\n return url[url.index(prefix):].rstrip(url_suffix).rstrip(INT_URL_SUFFIX)", "def get_issuer(site_url=None, request=None):\n site_url = get_site_url(site_url=site_url, request=request)\n path = reverse('oidc_provider:provider-info') \\\n .split('/.well-known/openid-configuration')[0]\n issuer = site_url + path\n\n return str(issuer)", "def gen_site_id(self, request):\n # originally, sep. based on scheme + host.\n audience = request.params.get('audience', '')\n if \"http:\" in audience:\n return urlparse.urlparse(audience).netloc\n else:\n if len(audience):\n return audience\n return ''", "def get_internal_url(self):\n prefer_internal_ip = self.charm_config.get(\"prefer-internal-ip\")\n fqdn = socket.getfqdn()\n ip = socket.gethostbyname(fqdn)\n if prefer_internal_ip:\n return \"http://{}:8008\".format(ip)\n return \"http://{}:8008\".format(fqdn)", "def get_URI_ref(self, val):\n if val == \"\":\n return URIRef(self.base)\n elif val[0] == '[' and val[-1] == ']':\n self.options.comment_graph.add_error(\"Illegal usage of CURIE: %s\" % val)\n return None\n else:\n return URIRef(urlparse.urljoin(self.base, val))", "def url(self):\r\n course_key = \"slashes:{course_org}+{course_num}+{course_run}\".format(**self.course_info)\r\n return \"/\".join([BASE_URL, self.url_path, course_key])", "def url_base():\n return \"https://dev-yourOrg.us.auth0.com\"", "def url(self):\n\n if self.identifier and self.identifier != \"\":\n return self.collection.url + self.identifier + \"/\"\n else:\n return self.collection.url", "def extract_doi_links(urls):\n doi_urls = [url for url in urls if \"/doi/\" in url]\n if len(doi_urls) > 0:\n return (\"http://dx.doi.org\" +\n doi_urls[0][doi_urls[0].find(\"/doi/\") + 4:])\n else:\n return None", "def loginurl(request, response):\n from google.appengine.api import users as gusers\n urls = {}\n for p in openIdProviders:\n p_name = p.split('.')[-2]\n p_url = p.lower()\n try:\n url = gusers.create_login_url(federated_identity=p_url)\n if not url: url = create_openid_url(p_url)\n except TypeError: continue\n urls[p_name] = url\n return urls", "def make_url(realm_url, endpoint):\n return \"{}/protocol/openid-connect/{}\".format(realm_url, endpoint)", "def get_url(self):\n return self.url.format(\n base_url=self.base_url,\n description=urllib.quote_plus(self.description),\n location=urllib.quote_plus(self.location),\n )" ]
[ "0.61636496", "0.60958976", "0.60149693", "0.59665805", "0.57917076", "0.5781677", "0.56911445", "0.5682871", "0.56211597", "0.5576372", "0.5564913", "0.55424225", "0.5527563", "0.5491155", "0.5478088", "0.541651", "0.5405972", "0.54037833", "0.5390824", "0.5363278", "0.5360469", "0.5351684", "0.53510815", "0.53004557", "0.52962744", "0.52955604", "0.5271427", "0.52548635", "0.52520555", "0.5250561" ]
0.70760375
0
Get the bioregistry link.
def _get_bioregistry_link(prefix: str, identifier: str) -> Optional[str]: norm_prefix, norm_identifier = normalize_curie(prefix, identifier) if norm_prefix is None: return None return f"{BIOREGISTRY_REMOTE_URL.rstrip()}/{norm_prefix}:{norm_identifier}"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def view_registry(self) -> None:\n\n arr = self.load_links()[0]\n for i,v in enumerate(arr):\n print(f\"<{i}: {v}>\\n\")\n pass", "def getLink(self):", "def get_link(prefix: str, identifier: str, use_bioregistry_io: bool = True) -> Optional[str]:\n providers = get_providers(prefix, identifier)\n for key in LINK_PRIORITY:\n if not use_bioregistry_io and key == \"bioregistry\":\n continue\n if key not in providers:\n continue\n rv = providers[key]\n if rv is not None:\n return rv\n return None", "def registry_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"registry_url\")", "def link(self) -> Optional[str]:\n return pulumi.get(self, \"link\")", "def link(self) -> Optional[str]:\n return pulumi.get(self, \"link\")", "def link(self):\n return self.container['link']", "def getLink(self):\n return self.link", "def link(self):\n return self._link", "def link(self):\n return self._link", "def link(self):\n return self._link", "def get_link(self, conf, link_id):\n\t\tpass", "def get_image_registry_url(self, image_name):\n c = self._oc_command([\"get\", \"is\", image_name,\n \"--output=jsonpath=\\'{ .status.dockerImageRepository }\\'\"])\n try:\n internal_registry_name = run_cmd(c, return_output=True)\n except subprocess.CalledProcessError as ex:\n raise ConuException(\"oc get is failed: %s\" % ex)\n\n logger.info(\"Image registry url: %s\", internal_registry_name)\n\n return internal_registry_name.replace(\"'\", \"\").replace('\"', '')", "def link(self):\n return 'http://{}:{}'.format(self.basic_url, self.port)", "def spinnaker_link_id(self):\n return self._spinnaker_link_id", "def get_link(self, name):\n return self._link_reg[name]", "def link(self):\n\n return self._get_field(\"link\")", "def self_link(self):\n return self._json['coredata'].get('link', [])[0].get('@href')", "def getIdLink(self):\n return self.urlLink()", "def registry_id(self) -> str:\n return self._registry_id", "def _link(self):\n return self._interface(self.fspath)", "def href(self):\n return self._href", "def href(self):\n return self._href", "def self_link(self) -> str:\n return pulumi.get(self, \"self_link\")", "def self_link(self) -> str:\n return pulumi.get(self, \"self_link\")", "def self_link(self) -> str:\n return pulumi.get(self, \"self_link\")", "def self_link(self) -> str:\n return pulumi.get(self, \"self_link\")", "def self_link(self) -> str:\n return pulumi.get(self, \"self_link\")", "def self_link(self) -> str:\n return pulumi.get(self, \"self_link\")", "def self_link(self) -> str:\n return pulumi.get(self, \"self_link\")" ]
[ "0.6344005", "0.6297033", "0.62138337", "0.6201464", "0.61723423", "0.61723423", "0.61248446", "0.61228025", "0.6117944", "0.6117944", "0.6117944", "0.5885266", "0.5865561", "0.577505", "0.57633257", "0.5762611", "0.57493734", "0.5675951", "0.56756413", "0.5662293", "0.56369156", "0.5631585", "0.5631585", "0.5622859", "0.5622859", "0.5622859", "0.5622859", "0.5622859", "0.5622859", "0.5622859" ]
0.744591
0
Get the URL to resolve the given prefix/identifier pair with the given resolver.
def get_registry_resolve_url(metaprefix: str, prefix: str, identifier: str) -> Optional[str]: providers = get_providers(prefix, identifier) if not providers: return None return providers.get(metaprefix)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_identifiers_org_url(prefix: str, identifier: str) -> Optional[str]:\n curie = get_identifiers_org_curie(prefix, identifier)\n if curie is None:\n return None\n return f\"https://identifiers.org/{curie}\"", "def resolveUri(self, *args):\n return _libsbml.SBMLResolverRegistry_resolveUri(self, *args)", "def _get_bioregistry_link(prefix: str, identifier: str) -> Optional[str]:\n norm_prefix, norm_identifier = normalize_curie(prefix, identifier)\n if norm_prefix is None:\n return None\n return f\"{BIOREGISTRY_REMOTE_URL.rstrip()}/{norm_prefix}:{norm_identifier}\"", "def get_resolver_endpoint(ResolverEndpointId=None):\n pass", "def resolveUri(self, *args):\n return _libsbml.SBMLResolver_resolveUri(self, *args)", "def resolve_url(url, redirects):\n s = url.find(':')\n if s < 0:\n return url\n scheme, rest = url[:s], url[s+1:]\n if scheme in redirects:\n root = redirects[scheme]\n elif scheme in REPO_ROOTS:\n root = REPO_ROOTS[scheme]\n else:\n return url\n root = root.rstrip('/')\n rest = rest.lstrip('/')\n return '/'.join([root, rest])", "def get_resolver_rule(ResolverRuleId=None):\n pass", "def request_uri(self, identifier):\n path = self.PATH_TEMPLATE % (identifier, identifier)\n return self.api_baseurl + path", "def get_n2t_url(prefix: str, identifier: str) -> Optional[str]:\n n2t_prefix = get_n2t_prefix(prefix)\n if n2t_prefix is None:\n return None\n curie = f\"{n2t_prefix}:{identifier}\"\n if curie is None:\n return None\n return f\"https://n2t.net/{curie}\"", "def resolver_endpoint_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"resolver_endpoint_id\")", "def resolve_id(resolver, identifier, name='object'):\r\n ids = resolver(identifier)\r\n\r\n if len(ids) == 0:\r\n raise CLIAbort(\"Error: Unable to find %s '%s'\" % (name, identifier))\r\n\r\n if len(ids) > 1:\r\n raise CLIAbort(\r\n \"Error: Multiple %s found for '%s': %s\" %\r\n (name, identifier, ', '.join([str(_id) for _id in ids])))\r\n\r\n return ids[0]", "def resolver_endpoint_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resolver_endpoint_id\")", "def _transform_identifier(self, identifier, scheme):\n urlize = self.context.get(\"urlize_identifiers\", True)\n prefix_scheme = self.context.get(\"prefix_identifier_schemes\", True)\n result = None\n\n if urlize:\n result = idutils.to_url(identifier, scheme, url_scheme=\"https\")\n\n if not result and prefix_scheme and not identifier.startswith(scheme):\n result = f\"{scheme}:{identifier}\"\n\n return result or identifier", "def get_urlresolver():\n\n try:\n from django.core import urlresolvers\n return urlresolvers\n except ImportError:\n from django import urls\n return urls", "def resolveUri(self, *args):\n return _libsbml.SBMLFileResolver_resolveUri(self, *args)", "def get_url(self, **kwargs):\n\n return build(\n self._request.path,\n self._request.GET,\n self._meta.prefix,\n **kwargs )", "def lookup_url_by_uid(self, uid):\n return self.get_by_uid(uid).get('@id')", "def get_prefix_url(request):", "def as_url(cls, api=None, name_prefix='', url_prefix=''):\r\n url_prefix = url_prefix and \"%s/\" % url_prefix\r\n name_prefix = name_prefix and \"%s-\" % name_prefix\r\n\r\n url_regex = '^%s%s/?$' % (\r\n url_prefix, cls._meta.url_regex.lstrip('^').rstrip('/$'))\r\n url_regex = url_regex.replace('//', '/')\r\n url_name = '%s%s' % (name_prefix, cls._meta.url_name)\r\n\r\n return url(url_regex, cls.as_view(api=api), name=url_name)", "def get_lookup_url(self, resource_obj=None, **kwargs):\n\n return self._generate_url(resource_obj=resource_obj, **kwargs)", "def getQualifiedURL(uri = None):\n schema, stdport = ('http', '80')\n host = os.environ.get('HTTP_HOST')\n if not host:\n host = os.environ.get('SERVER_NAME')\n port = os.environ.get('SERVER_PORT', '80')\n if port != stdport: host = host + \":\" + port\n result = \"%s://%s\" % (schema, host)\n if uri: result = result + uri\n return result", "def get_default_url(prefix: str, identifier: str) -> Optional[str]:\n entry = get_resource(prefix)\n if entry is None:\n return None\n return entry.get_default_url(identifier)", "def _lookup_url(self, endpoint, values):\r\n try:\r\n cont = self.get_container(values['container'])\r\n if cont.cdn_enabled:\r\n return \"%s/%s\" % (cont.cdn_uri, values['filename'])\r\n else:\r\n return None\r\n except: # pragma: no cover\r\n return None", "def url_for(self, *args, **kwargs):\n return yarl.URL(self.url(parts=kwargs))", "def get_obofoundry_link(prefix: str, identifier: str) -> Optional[str]:\n fmt = get_obofoundry_format(prefix)\n if fmt is None:\n return None\n return f\"{fmt}{identifier}\"", "def _make_url(self, url_part, blueprint_prefix):\n parts = (blueprint_prefix, self.prefix, url_part)\n return ''.join(_ for _ in parts if _)", "def get_resolver_xml_url(dataset_url):\n root = get_element_root_from_url(dataset_url)\n latest = ''\n latest_base = ''\n latest_url = None\n for s in root.findall(xmlns_prefix + 'service'):\n if (s.get(\"serviceType\") == \"Resolver\"):\n latest = s.get(\"name\")\n latest_base = s.get(\"base\")\n ds = find_dataset(root,latest)\n if (ds is not None):\n # TODO: generalize this to handle relatives paths starting with a '/' \n latest_url = get_url_path(dataset_url) + latest_base + '/' + ds.get('urlPath')\n return latest_url", "def resolved_url(self):\n # '{year}/{release}-Year/csv_{record_type}(state}.zip'\n us = self.url_proto.format(year=self._year, release=self._release,\n record_type=self.record_type.lower(), state = self._state.lower())\n\n return parse_app_url(us)", "def get_url(self, routename, **kargs):\r\n scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'\r\n location = self.router.build(routename, **kargs).lstrip('/')\r\n return urljoin(urljoin('/', scriptname), location)", "def expand_uri(id: str, cmaps: Optional[List[PREFIX_MAP]] = None, strict: bool = False) -> str:\n try:\n prefix, localid = id.split(\":\", 1)\n except ValueError:\n if strict:\n raise InvalidSyntax(id) from None\n else:\n return id\n\n if cmaps is None:\n uri = default_converter.expand(curie=id)\n if uri is not None:\n return uri\n elif strict:\n raise NoExpansion(prefix, localid)\n else:\n return id\n\n for cmap in cmaps:\n if prefix in cmap:\n return cmap[prefix] + localid\n if strict:\n raise NoExpansion(prefix, localid)\n else:\n return id" ]
[ "0.6380464", "0.6088858", "0.60886234", "0.6022632", "0.5849159", "0.5730095", "0.56223696", "0.55889964", "0.55541784", "0.55472934", "0.5524634", "0.5510377", "0.5502343", "0.543467", "0.54266346", "0.53655595", "0.5344931", "0.5324596", "0.53097606", "0.5302649", "0.5294215", "0.5292815", "0.5292759", "0.52755576", "0.5247757", "0.5241616", "0.5230274", "0.51842284", "0.5175127", "0.5158373" ]
0.700943
0
Initialize types global variables with common types.
def initialize_types(): global VOID, VOID_P, VOID_PP global CHAR, CHAR_P, CHAR_PP global INT, INT_P, INT_108A global ULONG, UINT VOID = gdb.lookup_type("void") VOID_P = VOID.pointer() VOID_PP = VOID_P.pointer() CHAR = gdb.lookup_type("char") CHAR_P = CHAR.pointer() CHAR_PP = CHAR_P.pointer() INT = gdb.lookup_type("int") INT_P = INT.pointer() INT_108A = INT.array(108) UINT = gdb.lookup_type("unsigned int") ULONG = gdb.lookup_type("unsigned long")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self): \n self.types = {}", "def __set_utils_types(self):\n self.__arrayt = type(self.c_byte * 1)\n # self.__cfuncptrt = type(type(self.memmove))\n # class _p(self.Structure):\n # pass\n # self.__ptrt = type(self.POINTER(_p))\n self.__basic_types_name = {\n 'c_bool': '?',\n 'c_char': 'c',\n 'c_byte': 'b',\n 'c_ubyte': 'B',\n 'c_short': 'h',\n 'c_ushort': 'H',\n 'c_int': 'i', # c_int is c_long\n 'c_uint': 'I',\n 'int': 'i',\n 'c_longlong': 'q',\n 'c_ulonglong': 'Q',\n 'c_float': 'f',\n 'c_double': 'd',\n 'c_longdouble': 'g',\n 'c_char_p': 's',\n 'c_void_p': 'P',\n # 'c_void': 'P', ## void in array is void_p ##DEBUG\n }\n if self.__longsize == 4:\n # long == int\n self.__basic_types_name.update({'c_long': 'i',\n 'c_ulong': 'I',\n 'long': 'i',\n 'c_void': 'I'})\n elif self.__longsize == 8:\n # long == longlong\n self.__basic_types_name.update({'c_long': 'q',\n 'c_ulong': 'Q',\n 'long': 'q',\n 'c_void': 'Q'})\n # we need to account for the possible changes in c_longdouble\n self.__basic_types = set([getattr(self, k) for k in self.__basic_types_name.keys() if hasattr(self, k)])\n return", "def _initialize_builtins():\n # type: () -> None\n cast_map = (\n (a, b)\n for a in (str, int, float, bool)\n for b in (str, int, float, bool)\n )\n for source, target in cast_map:\n add_conversion(1, source, (), target, (), target)\n\n # TODO: Consider support for more generic types. So conversions can happen\n # within container types. eg convert List[str] to List[int]", "def __init__(self):\n\n # Dictionary of types seen so far. Builtin types always available.\n # Values : list of constructors which the type defines\n # This is a smartdict, so keys can be retrieved.\n self.knownTypes = smartdict.Smartdict()\n for typecon in ast.builtin_types_map.values():\n self.knownTypes[typecon()] = None\n\n # Dictionary of constructors encountered so far.\n # Value: Type which the constructor produces.\n # This is a smartdict, so keys can be retrieved.\n self.knownConstructors = smartdict.Smartdict()", "def _types(cls):\n return {}", "def init_locals(self):\n pass", "def __init__(self):\n # just the list of class/construct types\n self.lut = {}\n self.lut[\"struct\"] = structure\n self.lut[\"typedef\"] = typedef\n self.lut[\"define\"] = define\n self.lut[\"enum\"] = enum\n self.lut[\"enumEntry\"] = enumEntry\n self.lut[\"ifdef\"] = ifdef\n self.lut[\"ifndef\"] = ifndef\n self.lut[\"hashIf\"] = hashIf\n self.lut[\"hashElse\"] = hashElse\n self.lut[\"hashElif\"] = hashElif\n self.lut[\"endif\"] = endif\n self.lut[\"banner\"] = banner\n self.lut[\"general\"] = general\n self.lut[\"listDefine\"] = listDefine\n self.lut[\"listEntry\"] = listEntry\n self.lut[\"listNumEls\"] = listNumEls\n self.lut[\"union\"] = union\n\n # and the dictionary of all symbols we declare\n self.symbols = {}", "def ioc(globals):\n\tfrom Module.Shapes.ShapeFactory import shape_factory\n\tglobals['shape_factory'] = shape_factory\n\tfrom Module.Lighting.Colors import Colors\n\tglobals['Colors'] = Colors", "def __init__(self, type_):\n\n self.type = type_", "def init_vars(self):\n # type: () -> None\n raise NotImplementedError", "def __init__(self):\n self.instantiable = {self: self}\n self.is_generic = False", "def reset():\n global typeMap, declList\n typeMap = {}\n declList = builtins[:]\n for t in builtins: typeMap[t.identifier] = t", "def initGlobals():\n libxml2mod.xmlInitGlobals()", "def __init__(self):\n FooBar = None\n Foo = None\n FOO = None\n foo_bar = None", "def initialize(cls):", "def init_elect_types(self):\n self.wta = WinnerTakeAll()\n self.proportional = Proportional()\n self.schulze = Schulze()\n\n session.add_all([self.wta, self.proportional, self.schulze])", "def _addGlobals(self, globalsDict):\n globalsDict['obj'] = None\n globalsDict['role'] = None\n globalsDict['pyatspi'] = pyatspi", "def __init__(self, type=np.float64):\n self._inst = None\n self._type = type", "def __init__(self):\n thisType = type(self)\n if not thisType._initialized:\n thisType._initialized = True\n self._embedded_device_registry = {}\n self._root_device_registry = {}\n self._service_registry = {}\n self._scan_for_device_extensions_under_code_container(dynamic_extensions)\n self._scan_for_device_extensions_under_code_container(standard_extensions)\n self._scan_for_service_extensions_under_code_container(dynamic_extensions)\n self._scan_for_service_extensions_under_code_container(standard_extensions)\n return", "def __init__(self, rouge_types):\n\n self.rouge_types = rouge_types", "def __init__(self, aType):\n if not isinstance(aType, TypeType):\n aType = type(aType)\n self.aType = aType\n try:\n self.fast_validate = CoercableTypes[aType]\n except:\n self.fast_validate = (11, aType)", "def typedefs(self):\n raise exceptions.NotImplementedError()", "def initTypes(self):\n self.types = [ty.NoneType]*self.numcols()\n for k,row in enumerate(self.data):\n for i in range(self.numcols()):\n val = row[i]\n typ = self.types[i]\n if not val is None:\n if typ in [ty.NoneType,ty.IntType]:\n if val.isdigit():\n row[i] = int(val)\n if val.startswith('-') and val[1:].isdigit():\n row[i] = -int(val[1:])\n self.types[i] = ty.IntType\n continue\n if typ in [ty.NoneType,ty.IntType,ty.FloatType]:\n try:\n row[i] = float(val)\n if not typ == ty.FloatType:\n self.types[i] = ty.FloatType\n # Convert already existing values\n for j in range(k):\n elt = self.data[j][i]\n self.data[j][i] = None if elt is None else float(elt)\n continue\n except ValueError:\n pass\n if typ in [ty.NoneType,utils.Date]:\n try:\n row[i] = utils.Date(val)\n self.types[i] = utils.Date\n continue\n except ValueError:\n pass\n row[i] = unicode(val)\n if not typ == ty.UnicodeType:\n self.types[i] = ty.UnicodeType\n # Convert already existing values\n for j in range(k):\n elt = self.data[j][i]\n self.data[j][i] = None if elt is None else unicode(elt)", "def __init__(self):\n super(Modules, self).__init__()\n \n global superclasses\n superclasses['universe'] = []\n superclasses['actions'] = ['universe']\n superclasses['booleans'] = ['universe']\n\n global instances\n instances['universe'] = set()\n instances['actions'] = set()\n instances['booleans'] = set()", "def __init__(self):\n self.func = {\n \"str\": DataType.str,\n \"int\": DataType.int,\n \"float\": DataType.float,\n \"duration\": DataType.duration,\n \"datetime\": DataType.datetime,\n }", "def __init__(self):\n dict.__init__(self)\n self.datatype = None", "def base_type_dict():\n return {'filter' : filters.Filter,\n 'global_options' : global_options.GlobalOptions,\n 'input_device' : input_devices.InputDevice,\n 'input_stream' : input_streams.InputStream,\n 'output_device' : output_devices.OutputDevice,\n 'output_stream' : output_streams.OutputStream}", "def __init__(self, type, value):\r\n self._type = type\r\n self._value = value", "def ntypes(self): # -> None:\n ...", "def _get_types(self):\n\n db = Database()\n self.c_built_ins = list(map(lambda tup: tup[0], db.select_built_types()))\n self.c_built_in_array_types = r'^(' + '|'.join(self.escaped(self.c_built_ins)) + ')\\[[0-9]*\\]'\n self.c_types = list(map(lambda tup: tup[0], db.select_types()))\n self.c_array_types = r'^(' + '|'.join(self.escaped(self.c_types)) + ')\\[[0-9]*\\]'\n db.close_connection()" ]
[ "0.65056497", "0.6477415", "0.63417953", "0.62999207", "0.62689316", "0.6072707", "0.60454255", "0.60141075", "0.5958679", "0.59225327", "0.58071923", "0.57830304", "0.5762805", "0.5757923", "0.57541144", "0.57219446", "0.5718443", "0.56720936", "0.56538254", "0.5637154", "0.5634583", "0.5592444", "0.559125", "0.5585815", "0.55721617", "0.5566556", "0.5565012", "0.5556359", "0.5546221", "0.5543951" ]
0.7307148
0
Reads the memory location `where` with type `ttype`.
def read_value(where, ttype=None): ttype = VOID_P if ttype is None else ttype frame = gdb.selected_frame() if where.startswith("$"): return frame.read_register(where[1:]).cast(ttype) else: to_parse = "(%s) %s" % (str(ttype), where) return gdb.parse_and_eval(to_parse)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __getitem__(self, where):\n return self._data[where]", "def fetch_where(self, tablename, where):\n\n if type(where) != str:\n raise NotAStringError(\"please provide a valid where clause\")\n\n query = 'select * from ' + tablename + ' where ' + where\n\n try:\n self.__cur.execute(query)\n except Exception as e:\n self.__conn.rollback()\n raise e\n\n fetcheddata = self.__cur.fetchall()\n fetcheddata = self.__helper._functions__rowtodict(fetcheddata)\n return fetcheddata", "def read_verify_location(self, param_type, path, pool=None, cont=None):\n self.run_ior_with_params(param_type, path, pool, cont,\n self.test_file, self.ior_flags[1])", "def _extract_where(self, query) :\n\t\tquery = copy.copy(query)\n\t\t\n\t\t# discard the insert information\n\t\tif self.n.sparql.insert in query :\n\t\t\tdel query[self.n.sparql.insert]\n\t\t\n\t\t# discard the delete information\n\t\tif self.n.sparql.delete in query :\n\t\t\tdel query[self.n.sparql.delete]\n\t\t\n\t\t# build the where clause with outlined variables\n\t\treturn self.python_to_SPARQL_long(query)", "def read_data(self, loc):\n pass", "def convert_where(g, op, block):\n\n condition = g.get_node(op.input(\"Condition\")[0])\n x = g.get_node(op.input(\"X\")[0])\n y = g.get_node(op.input(\"Y\")[0])\n out = _op.where(condition, x, y)\n g.add_node(op.output(\"Out\")[0], out)", "def read_type(adr, tyname):\n data = Rsp.read_mem(adr,tyname2size(tyname))\n data = bytes.fromhex(data)\n res, = struct.unpack(tyname2fmt(tyname), data)\n return(res)", "def locateObjLocation(data, questionDict, questionIdict):\n where = questionDict['where']\n for t in range(data.shape[0] - 1):\n if data[t, 0] == where:\n for u in range(t + 1, data.shape[0]):\n word = questionIdict[data[u, 0] - 1]\n lexname = lookupLexname(word)\n if (lexname is not None and \\\n lexname.startswith('noun')) or \\\n (lexname is None):\n return data[u, 0]\n print 'not found'\n return data[-1, 0]", "def read_memory(self, address):\n return self.memory[Vm.filter_mem_address(address)]", "def from_position(tu, file, line, column):\r\n return conf.lib.clang_getLocation(tu, file, line, column)", "def where(self, **where):\n\n where = _handle_where(where)\n return self._post(\"\", Table, **where)", "def read_data(self, cond=None):\n data, last, _id, user, dest, valid = self.tdata.read_data(cond)\n return data, last, _id, user, dest, valid", "def convert_where_index(g, op, block):\n\n condition = g.get_node(op.input(\"Condition\")[0])\n out = _op.argwhere(condition)\n g.add_node(op.output(\"Out\")[0], out)", "def read_memory(self, address):\n\n return self.memory[address]", "def peek (self, where=0) :\r\n if (where<0 or where>=len(self)) :\r\n m = \"Trying to peek beyond the end of the Circ. Buff\"\r\n raise Exception(m)\r\n index = (self.nextGet_+where) % self.capacity()\r\n return self.buff_[index]", "def getLineInformation(line):\n \n pass", "def search_memory_of_types(self, pattern, *memtypes):\n inferior = gdb.selected_inferior()\n locations = []\n for memrange in self.ranges:\n if memrange.memtype not in memtypes:\n continue\n\n loc = ctypes.c_void_p(memrange.startaddr).value\n end = ctypes.c_void_p(memrange.endaddr).value\n while loc < end:\n loc = inferior.search_memory(loc, end - loc, pattern)\n if loc is None or loc == 0:\n loc = end\n else:\n locations.append(loc)\n loc += size_t.sizeof\n\n return locations", "def ReadProcessMemory(\n process_handle: int,\n address: int,\n pytype: Type[T],\n bufflength: int\n) -> T:\n if pytype not in [bool, int, float, str, bytes]:\n raise ValueError(\"The type must be bool, int, float, str or bytes.\")\n\n data = get_c_type_of(pytype, bufflength)\n kernel32.ReadProcessMemory(process_handle, ctypes.c_void_p(address), ctypes.byref(data), bufflength, None)\n\n return str(data.value) if pytype is str else data.value", "def readQueryFromFile(location):\n try:\n queryFileHandler = open(location, \"r\")\n except EnvironmentError as e:\n errMsg = (\"Error reading the SMT query from the file \"\n \"located at %s: %s\" % (location, e))\n raise GameTimeError(errMsg)\n else:\n with queryFileHandler:\n return Query(queryFileHandler.read())", "def read_data(self, cond=None):\n ready = make_condition(cond)\n val = 1 if ready is None else ready\n\n _connect_ready(self.tready._get_module(), self.tready, val)\n\n data = self.tdata\n valid = self.tvalid\n last = self.tlast\n _id = self.tid\n user = self.tuser\n dest = self.tdest\n\n return data, last, _id, user, dest, valid", "def read_ptr(self, offset):\n return self.read_int64(offset)", "def Get_value_from_memory(self, address, type):\r\n\r\n\r\n if (address < len(self.Memory_data_segment)) and (address + type <= len(self.Memory_data_segment)):\r\n\r\n ret = \"\"\r\n i = address\r\n while i < (address + type):\r\n ret = self.Memory_data_segment[i] + ret\r\n i += 1\r\n return int(ret, 16)\r\n else:\r\n self.State=\"RTE\"\r\n return False", "def getTuple(n,type=\"R\",thing = \"T\"):\r\n if type == \"R\":\r\n n=n+\".root\"\r\n print \"getting file \"+n\r\n \r\n file=TFile(n)\r\n t=file.Get(thing)\r\n if type==\"X\":\r\n translate(n)\r\n t,file=getTuple(n,\"R\")\r\n return t,file", "def read(self, addr):\n if addr < len(self.RAM):\n return self.RAM[addr]", "def _read_var(self, time_idx, pressure_idx, var_idx, lat_idx, lng_idx):\n offset = self.item_size * (\n time_idx * self.t_idx + pressure_idx * self.p_idx +\n var_idx * self.v_idx + lat_idx * self.l_idx + lng_idx)\n self.mm.seek(offset)\n return self.unpacker.unpack(self.mm.read(self.item_size))[0]", "def getDataAtLocation(loc: ghidra.program.util.ProgramLocation) -> ghidra.program.model.listing.Data:\n ...", "def where(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"where\")", "def where(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"where\")", "def where(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"where\")", "def loadpts_traj(tnum, skip=40, filt=None):\n pts = []\n print('loading file: ', tnum)\n traj = md.load(DCD_PROT(tnum), top=PDB_PROT, stride=skip)\n if filt is not None:\n traj.atom_slice(filt, inplace=True)\n return traj.xyz" ]
[ "0.5448398", "0.53106546", "0.48611066", "0.48344597", "0.48211852", "0.47786808", "0.47259092", "0.45408067", "0.4456974", "0.44333473", "0.44250152", "0.43972963", "0.43949172", "0.43939638", "0.43215302", "0.43211624", "0.43104756", "0.43070924", "0.42990926", "0.42651215", "0.42546505", "0.42517266", "0.42435992", "0.42390233", "0.42332464", "0.42304924", "0.42232284", "0.42232284", "0.42232284", "0.42122325" ]
0.6839989
0
given a height along line of sight, the distance to the observation window and the solar radius, return the coronal density based off of streamer data fromLimb
def getDensity(h, R_w, R_sun): # k is a fitting constant R = np.sqrt(R_w**2+h**2) r = R/R_sun # units need to be in solar radii a = 77.1 b = 31.4 c = 0.954 d = 8.30 e = 0.550 f = 4.63 return (a*r**(-b) + c*r**(-d) + e*r**(-f))*10**8 #[cm-3]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_densities(self, r):\n # Handle scalar r values as single element array\n if not np.isscalar(r):\n r = np.array((r))\n\n assert np.min(r) > 0, \"Error: distances must be non-zero and positive.\"\n\n if self.s_exp is not None:\n oS, doS, ddoS = self.get_orbitals(self.s_n, self.s_exp, self.s_coef, r)\n\n den_0 = np.sum(self.s_occ[0][:,None]*oS**2, axis=0)\n den_1 = np.sum(self.s_occ[1][:,None]*oS**2, axis=0)\n\n grd_0 = np.sum(self.s_occ[0][:,None]*(oS*doS), axis=0)\n grd_1 = np.sum(self.s_occ[1][:,None]*(oS*doS), axis=0)\n\n tau_0 = np.sum(self.s_occ[0][:,None]*doS**2, axis=0)\n tau_1 = np.sum(self.s_occ[1][:,None]*doS**2, axis=0)\n\n lap_s = oS*ddoS + doS**2 + 2*oS*doS/r\n lap_0 = np.sum(self.s_occ[0][:,None]*lap_s, axis=0)\n lap_1 = np.sum(self.s_occ[1][:,None]*lap_s, axis=0)\n else:\n # Otherwise supply zeros in place\n den_0 = np.zeros(r.shape)\n den_1 = np.zeros(r.shape)\n\n grd_0 = np.zeros(r.shape)\n grd_1 = np.zeros(r.shape)\n\n tau_0 = np.zeros(r.shape)\n tau_1 = np.zeros(r.shape)\n\n lap_0 = np.zeros(r.shape)\n lap_1 = np.zeros(r.shape)\n\n # Check if atom has occupied P orbitals\n if self.p_exp is not None:\n oP, doP, ddoP = self.get_orbitals(self.p_n, self.p_exp, self.p_coef, r)\n\n den_0 += np.sum(self.p_occ[0][:,None]*oP**2, axis=0)\n den_1 += np.sum(self.p_occ[1][:,None]*oP**2, axis=0)\n\n grd_0 += np.sum(self.p_occ[0][:,None]*oP*doP, axis=0)\n grd_1 += np.sum(self.p_occ[1][:,None]*oP*doP, axis=0)\n\n tau_0 += np.sum(self.p_occ[0][:,None]*(doP**2 + 2*(oP/r)**2), axis=0)\n tau_1 += np.sum(self.p_occ[1][:,None]*(doP**2 + 2*(oP/r)**2), axis=0)\n\n lap_p = oP*ddoP + doP**2 + 2*oP*doP/r\n lap_0 += np.sum(self.p_occ[0][:,None]*lap_p, axis=0)\n lap_1 += np.sum(self.p_occ[1][:,None]*lap_p, axis=0)\n\n # Check if atom has occupied D orbitals\n if self.d_exp is not None:\n oD, doD, ddoD = self.get_orbitals(self.d_n, self.d_exp, self.d_coef, r)\n den_0 += np.sum(self.d_occ[0][:,None]*oD**2, axis=0)\n den_1 += np.sum(self.d_occ[1][:,None]*oD**2, axis=0)\n\n grd_0 += np.sum(self.d_occ[0][:,None]*oD*doD, axis=0)\n grd_1 += np.sum(self.d_occ[1][:,None]*oD*doD, axis=0)\n\n tau_0 += np.sum(self.d_occ[0][:,None]*(doD**2 + 6*(oD/r)**2), axis=0)\n tau_1 += np.sum(self.d_occ[1][:,None]*(doD**2 + 6*(oD/r)**2), axis=0)\n\n lap_d = oD*ddoD + doD**2 + 2*oD*doD/r\n lap_0 += np.sum(self.d_occ[0][:,None]*lap_d, axis=0)\n lap_1 += np.sum(self.d_occ[1][:,None]*lap_d, axis=0)\n\n # Check if atom has occupied F orbitals\n if self.f_exp is not None:\n oF, doF, ddoF = self.get_orbitals(self.f_n, self.f_exp, self.f_coef, r)\n den_0 += np.sum(self.f_occ[0][:,None]*oF**2, axis=0)\n den_1 += np.sum(self.f_occ[1][:,None]*oF**2, axis=0)\n\n grd_0 += np.sum(self.f_occ[0][:,None]*oF*doF, axis=0)\n grd_1 += np.sum(self.f_occ[1][:,None]*oF*doF, axis=0)\n\n tau_0 += np.sum(self.f_occ[0][:,None]*(doF**2 + 12*(oF/r)**2), axis=0)\n tau_1 += np.sum(self.f_occ[1][:,None]*(doF**2 + 12*(oF/r)**2), axis=0)\n\n lap_f = oF*ddoF + doF**2 + 2*oF*doF/r\n lap_0 += np.sum(self.f_occ[0][:,None]*lap_f, axis=0)\n lap_1 += np.sum(self.f_occ[1][:,None]*lap_f, axis=0)\n\n # Take care of scaling\n den_0 /= 4*pi\n den_1 /= 4*pi\n\n grd_0 /= 2*pi\n grd_1 /= 2*pi\n\n tau_0 /= 8*pi\n tau_1 /= 8*pi\n\n lap_0 /= 2*pi\n lap_1 /= 2*pi\n\n return den_0, den_1, grd_0, grd_1, tau_0, tau_1, lap_0, lap_1", "def dry_snow_density(self):\n return (self.rho - self.h2o_vol * RHO_W0) / \\\n (1 - self.h2o_vol * RHO_W0 / RHO_ICE)", "def _calculate_clim_data(day_of_year, fourier_coeffs):\n # Convert day_of_year into radians.\n rads = (day_of_year * 2 * math.pi) / 365\n clim_data = fourier_coeffs[0].data + (2 * \\\n (fourier_coeffs[1].data*math.cos(rads) + \\\n fourier_coeffs[2].data*math.cos(2*rads) + \\\n fourier_coeffs[3].data*math.cos(3*rads) + \\\n fourier_coeffs[4].data*math.cos(4*rads) - \\\n fourier_coeffs[5].data*math.sin(rads) - \\\n fourier_coeffs[6].data*math.sin(2*rads) - \\\n fourier_coeffs[7].data*math.sin(3*rads) - \\\n fourier_coeffs[8].data*math.sin(4*rads)))\n \n return clim_data", "def distcalc(z,h=0.70,omegalambda=0.7,omegam=0.3,omegak=0.0):\n\n H0 = 100 * h # this is in units of km/s/Mpc\n\n H0freq = H0 * constants.kilo/(constants.mega * constants.parsec) # this is H0 is units of Hz\n \n hubbletime = 1.0/H0freq # in seconds\n hubbletimeyr = hubbletime / constants.year\n\n #hubble distance\n dh = constants.c / H0freq # in meters\n\n #now i can calculate the comoving distance (line of sight) using hogg eqn 15\n dc = dh * integrate.quad(dcintegrand,0,z,(omegalambda,omegam,omegak))[0]\n\n #now i can find the transverse comoving distance using hogg eqn 16\n if omegak == 0:\n dm = dc\n elif omegak > 0:\n dm = dh/np.sqrt(omegak) * np.sinh(dc * np.sqrt(omegak) / dh)\n else:\n dm = dh/np.sqrt(abs(omegak)) * np.sin(dc * np.sqrt(abs(omegak)) / dh)\n\n\n #now i will calculate the angular diameter distance (hogg eqn 18)\n da = dm/(1+z)\n \n #now i will calculate scale in kpc/arcsec, since this is commonly used\n scale = da * constants.arcsec / (constants.kilo * constants.parsec)\n\n #now i will calculate the luminosity distance (hog eqn 21)\n dl = (1+z)*dm\n \n #now i will calculate lookback time and \n #time from the begining of the universe to that redshift using hogg eqn 30\n \n tlookback = hubbletimeyr * integrate.quad(timeintegrand,0,z,(omegalambda,omegam,omegak))[0]\n \n tz = hubbletimeyr * integrate.quad(timeintegrand,z,np.inf,(omegalambda,omegam,omegak))[0]\n \n #all sky co-moving volume out to redshift z (hogg eqn 30)\n if omegak == 0:\n vc = 4 * np.pi * dm**3 / 3\n elif omegak > 0:\n vc = ( 4 * np.pi * dh**3 / (2 * omegak) ) * ( dm * np.sqrt(1 + omegak * dm**2 / dh**2) / dh - \n np.arcsinh( np.sqrt(omegak) * dm / dh ) / np.sqrt(omegak) )\n else:\n vc = ( 4 * np.pi * dh**3 / (2 * omegak) ) * ( dm * np.sqrt(1 + omegak * dm**2 / dh**2) / dh - \n np.arcsin( np.sqrt(abs(omegak)) * dm / dh ) / np.sqrt(abs(omegak)) )\n\n #for output, i will make a dictionary\n output = dict(dh=dh, dc=dc, dm=dm, da=da, scale=scale, dl=dl, tlookback = tlookback, tz=tz, vc=vc)\n\n return output", "def sincbroad(w, s, hwhm):\n \"\"\"\n History\n -------\n Dec-90 GB,GM\n Rewrote with fourier convolution algorithm.\n Jul-91 AL\n Translated from ANA to IDL.\n 22-Sep-91 JAV\n Relaxed constant dispersion check# vectorized, 50% faster.\n 05-Jul-92 JAV\n Converted to function, handle nonpositive hwhm.\n 14-Nov-93 JAV\n Adapted from macbro.pro\n 23-Apr-93 JAV\n Verified that convolution kernel has specified hwhm. For IR FTS\n spectra: hwhm=0.0759 Angstroms, max change in profile is 0.4% of continuum.\n Oct-18 AW\n Python Version\n \"\"\"\n\n # Warn user if hwhm is negative.\n if hwhm < 0:\n logger.warning(\"Forcing negative smoothing width to zero.\")\n\n # Return input argument if half-width is nonpositive.\n if hwhm <= 0:\n return s # true: no broadening\n\n # Calculate (uniform) dispersion.\n nw = len(w) ## points in spectrum\n dw = (w[-1] - w[0]) / (nw - 1) # wavelength change per pixel\n\n # Make sinc function out to 20th zero-crossing on either side. Error due to\n # ignoring additional lobes is less than 0.2% of continuum. Reducing extent\n # to 10th zero-crossing doubles maximum error.\n fwhm = 2.0 * hwhm # full width at half maximum\n rperfw = 0.26525 # radians per fwhm of sinc\n xrange = 20 * np.pi # 20th zero of sinc (radians)\n wrange = xrange * fwhm * rperfw # 20th zero of sinc (wavelength)\n nhalf = int(wrange / dw + 0.999) ## points in half sinc\n nsinc = 2 * nhalf + 1 ## points in sinc (odd!)\n wsinc = (np.arange(nsinc, dtype=float) - nhalf) * dw # absissca (wavelength)\n xsinc = wsinc / (fwhm * rperfw) # absissca (radians)\n xsinc[nhalf] = 1.0 # avoid divide by zero\n sinc = np.sin(xsinc) / xsinc # calculate sinc\n sinc[nhalf] = 1.0 # insert midpoint\n xsinc[nhalf] = 0.0 # fix xsinc\n sinc = sinc / np.sum(sinc) # normalize sinc\n\n # Pad spectrum ends to minimize impact of Fourier ringing.\n sout = convolve(s, sinc, mode=\"nearest\")\n\n return sout", "def dp_radius(self, s, survey='SPIRE_500'):\n shape = np.array(s[survey].shape)\n cosPA, sinPA = np.cos(s['PA_RAD']), np.sin(s['PA_RAD'])\n cosINCL = s['cosINCL']\n w = s[survey + '_WCS']\n xcm, ycm = s['RA_RAD'], s['DEC_RAD']\n dp_coords = np.zeros([shape[0], shape[1], 2])\n # Original coordinate is (y, x)\n # :1 --> x, RA --> the one needed to be divided by cos(incl)\n # :0 --> y, Dec\n dp_coords[:, :, 0], dp_coords[:, :, 1] = \\\n np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))\n # Now, value inside dp_coords is (x, y)\n # :0 --> x, RA --> the one needed to be divided by cos(incl)\n # :1 --> y, Dec\n for i in range(shape[0]):\n dp_coords[i] = Angle(w.wcs_pix2world(dp_coords[i], 1) * u.deg).rad\n dp_coords[:, :, 0] = 0.5 * (dp_coords[:, :, 0] - xcm) * \\\n (np.cos(dp_coords[:, :, 1]) + np.cos(ycm))\n dp_coords[:, :, 1] -= ycm\n # Now, dp_coords is (dx, dy) in the original coordinate\n # cosPA*dy-sinPA*dx is new y\n # cosPA*dx+sinPA*dy is new x\n if survey[:5] == 'GALEX':\n return np.sqrt((cosPA * dp_coords[:, :, 1] +\n sinPA * dp_coords[:, :, 0])**2 +\n ((cosPA * dp_coords[:, :, 0] -\n sinPA * dp_coords[:, :, 1]))**2) * \\\n s['DIST_MPC'] * 1.0E3 # Radius in kpc\n else:\n return np.sqrt((cosPA * dp_coords[:, :, 1] +\n sinPA * dp_coords[:, :, 0])**2 +\n ((cosPA * dp_coords[:, :, 0] -\n sinPA * dp_coords[:, :, 1]) / cosINCL)**2) * \\\n s['DIST_MPC'] * 1.0E3 # Radius in kpc", "def heterodyne_corr(x,sr,f,maxwind=2**14,nhop=2**10,nper=3,dc_cut=50, release_partials=True):\n xx = x.copy()\n t = np.arange(len(x))/sr\n\n nharm = len(f)\n ret = []\n part = np.zeros((len(x),nharm))\n for ii,ff in enumerate(f):\n if ff==0.:\n nwind=maxwind\n foth = np.delete(f,ii)\n nwind = (sr/np.min(np.abs(foth-ff))*nper)\n print(nwind)\n hetsig = np.exp(1j*2*np.pi*ff*t)\n if release_partials:\n cc,ih = heterodyne(xx,hetsig,wind=np.hanning(nwind),hop=nhop)\n else:\n cc,ih = heterodyne(x,hetsig,wind=np.hanning(nwind),hop=nhop)\n if ff==0.:\n cc/=2\n th=ih/sr\n ret.append(ts.SampledTimeSeries(cc,th,label='%.2f'%ff))\n ret[-1].f = ff\n hf = np.interp(t,th,cc)\n xp = np.real(np.conjugate(hf)*hetsig)\n xx-=xp\n part[:,ii]=xp\n return ret,xx,part", "def coriolis(self, lat):\n return 2. * self.omega * np.sin(np.deg2rad(lat))", "def calc_kernel_influence(src_lon, src_lat, target_lon, target_lat, d):\n Cd = d / (2 * np.pi)\n kernel_exponent = 1.5\n delta_lon = src_lon - target_lon\n delta_lat = src_lat - target_lat\n distance = np.sqrt(np.power(delta_lon, 2) + np.power(delta_lat, 2))\n Kr = Cd / np.power(distance + d, kernel_exponent)\n return Kr", "def _calc_solar_from_clouds_and_angle(hr, ds_path):\n # Solar radiation [W/m^2] incident on top of atmosphere\n Q_o = 1368.0\n # Cloud model based on Dobson and Smith, table 5\n # SEA -- May 2010 : redid the cloud parametrization based on UBC\n # Solar data (/ocean/shared/SoG/met/solar/) fitting Q to cos_Z\n # (not Q/cos_Z as Kate did). Allen and Wolfe (2013). (0) no\n # clouds, (1) 1/10 cloud fraction (10) 100% clouds. Four sig\n # figs are what comes out of matlab but standard deviations are\n # 40W/m2 for low cloud fraction to 120 W/m2 for 6-9 cloud\n # fraction to 85 W/m2 for completely cloudy.\n cloud_consts = SimpleNamespace(\n A=numpy.array(\n [\n 0.6337,\n 0.6149,\n 0.5861,\n 0.5512,\n 0.5002,\n 0.4649,\n 0.4225,\n 0.3669,\n 0.2468,\n 0.1981,\n 0.0841,\n ]\n ),\n B=numpy.array(\n [\n 0.1959,\n 0.2119,\n 0.2400,\n 0.2859,\n 0.3192,\n 0.3356,\n 0.3339,\n 0.3490,\n 0.4427,\n 0.3116,\n 0.2283,\n ]\n ),\n )\n # Local standard time\n ## WARNING: .to(\"PST\") may be fragile and incorrect for summer-time dates\n lst = hr.to(\"PST\")\n # day_time is in seconds, LST\n day_time = (lst - lst.floor(\"day\")).seconds\n # hour of day as degrees from noon\n hour = (day_time / 3600 - 12) * 15\n # day is year-day\n day = (lst - lst.floor(\"year\")).days\n # solar declination [radians]\n declination = (\n 23.45 * numpy.pi / 180 * numpy.sin((284 + day) / 365.25 * 2 * numpy.pi)\n )\n # Latitude of approximate centre of model domain in radians\n lat = numpy.pi * 50 / 180\n # solar elevation\n elev_sin = numpy.sin(declination) * numpy.sin(lat)\n elev_cos = numpy.cos(declination) * numpy.cos(lat)\n cos_Z = elev_sin + elev_cos * numpy.cos(numpy.pi / 180 * hour)\n # cos of -hour_angle in radians\n hour_angle = numpy.tan(lat) * numpy.tan(declination)\n # assume we are south of the Arctic Circle\n day_length = numpy.arccos(-hour_angle) / 15 * 2 * 180 / numpy.pi\n sunrise = 12 - 0.5 * day_length # hours\n sunset = 12 + 0.5 * day_length # hours\n Qso = Q_o * (1 + 0.033 * numpy.cos(day / 365.25 * 2 * numpy.pi))\n with xarray.open_dataset(ds_path) as ds:\n cf_value = ds.percentcloud * 10\n fcf = numpy.floor(cf_value).astype(int) # integer below cf value\n fcf = xarray.where(fcf == 10, 9, fcf).data\n ccf = fcf + 1 # integer above cf value\n if (sunrise > day_time / 3600) or (day_time / 3600 > sunset):\n # nighttime\n return xarray.zeros_like(ds.percentcloud)\n return (\n Qso\n * (\n cloud_consts.A[fcf] * (ccf - cf_value)\n + cloud_consts.A[ccf] * (cf_value - fcf)\n + (\n cloud_consts.B[fcf] * (ccf - cf_value)\n + cloud_consts.B[ccf] * (cf_value - fcf)\n )\n * cos_Z\n )\n * cos_Z\n )", "def directivity(horn_width, horn_height, eplane_effective_length, hplane_effective_length, frequency):\n # Calculate the wavelength\n wavelength = c / frequency\n \n # Calculate the arguments for the Fresnel integrals\n u = 1.0 / sqrt(2.0) * (sqrt(wavelength * hplane_effective_length) / horn_width + \n horn_width / sqrt(wavelength * hplane_effective_length))\n\n v = 1.0 / sqrt(2.0) * (sqrt(wavelength * hplane_effective_length) / horn_width - horn_width /\n sqrt(wavelength * hplane_effective_length))\n\n # Calculate the Fresnel sin and cos integrals\n Su, Cu = fresnel(u)\n Sv, Cv = fresnel(v)\n\n arg = horn_height / sqrt(2.0 * wavelength * eplane_effective_length)\n\n S2, C2 = fresnel(arg)\n\n S2 *= S2\n C2 *= C2\n\n return 8.0 * pi * eplane_effective_length * hplane_effective_length / (horn_width * horn_height) * \\\n ((Cu - Cv) ** 2 + (Su - Sv) ** 2) * (C2 + S2)", "def measure_curvature(self, warped, leftx, rightx):\n\t\t# Define conversions in x and y from pixels space to meters\n\t\t#xm_per_pix = 3.7/warped.shape[1] # meters per pixel in x dimension\n\t\t#ym_per_pix = 30.0/warped.shape[0] # meters per pixel in y dimension\n\t\txm_per_pix = 3.7/700 # meters per pixel in x dimension\n\t\tym_per_pix = 30.0/720 # meters per pixel in y dimension\n\t\t# Generate some fake data to represent lane-line pixels\n\t\tploty = np.linspace(0, 719, num=720) # to cover same y-range as image\n\t\t# Fit second order polynomials to x, y in world space\n\t\tleft_fit_cr = np.polyfit(ploty * ym_per_pix, leftx * xm_per_pix, 2)\n\t\tright_fit_cr = np.polyfit(ploty * ym_per_pix, rightx * xm_per_pix, 2)\n\t\t# Define y-value where we want radius of curvature\n\t\t# Choose the maximum y-value, corresponding to the bottom of the image\n\t\ty_eval = np.max(ploty)\n\t\t# Calculate radius of fitted curvature\n\t\tleft_curverad = ((1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute(2 * left_fit_cr[0])\n\t\tright_curverad = ((1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5) / np.absolute(2 * right_fit_cr[0])\n\t\t# Calculate the lane deviation\n\t\tlane_deviation = self.lane_deviation(warped, xm_per_pix)\n\n\t\treturn left_curverad, right_curverad, lane_deviation", "def clinopyroxene_92():\n\n rho = 3327.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 257.3; C[0,1] = 85.9; C[0,2] = 76.2; C[0,3] = 0.; C[0,4] = 7.1; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 216.2; C[1,2] = 71.8; C[1,3] = 0.; C[1,4] = 13.3; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 260.2; C[2,3] = 0.; C[2,4] = 33.7; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 80.2; C[3,4] = 0.; C[3,5] = 10.2\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 70.6; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 85.8\n\n return C, rho", "def clinopyroxene_98():\n\n rho = 3190.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 237.8; C[0,1] = 83.5; C[0,2] = 80.; C[0,3] = 0.; C[0,4] = 9.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 183.6; C[1,2] = 59.9; C[1,3] = 0.; C[1,4] = 9.5; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 229.5; C[2,3] = 0.; C[2,4] = 48.1; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 76.5; C[3,4] = 0.; C[3,5] = 8.4\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 73.; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 81.6\n\n return C, rho", "def dline_dSFR(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n GR = glo.global_results(sim_run=p.sim_run,nGal=p.nGal)\n \n marker = 'o'\n if p.sim_run == p.sim_runs[0]: marker = '^'\n\n L_line = getattr(GR,'L_'+p.line+'_sun')#[380:400]#[0:100]\n SFR = getattr(GR,'SFR')#[380:400]#[0:100]\n M_star = getattr(GR,'M_star')#[380:400]#[0:100]\n Zsfr = getattr(GR,'Zsfr')#[380:400]#[0:100]\n R_gas = getattr(GR,'R2_gas')#[380:400]#[0:100]\n M_H2 = getattr(GR,'M_H2_R2_gas')#[380:400]#[0:100]\n\n SFR = SFR[L_line > 0]\n M_star = M_star[L_line > 0]\n Zsfr = Zsfr[L_line > 0]\n R_gas = R_gas[L_line > 0]\n M_H2 = M_H2[L_line > 0]\n L_line = L_line[L_line > 0]\n print('%i data points ' % (len(L_line)))\n\n # Distance from MS\n dlSFR = aux.distance_from_salim18(GR.M_star,GR.SFR)\n\n if p.add:\n ax = p.ax\n else:\n fig,ax = plt.subplots(figsize=(8,6))\n\n # Distance from observed relation\n L_obs,SFR_obs,fit,std = add_line_SFR_obs(p.line,[1e6,1e6],ax,plot=False,select=p.select)\n ldL_line = np.log10(L_line) - fit.predict(np.log10(SFR.reshape(-1, 1))).flatten()\n\n labs = {'_M10':'Mach=10 power-law',\\\n '_arepoPDF_ext':'AREPO parametric PDF with extinction',\\\n '_arepoPDF':'SIGAME v3',\\\n '_arepoPDF_CMZ':'SIGAME v3',\\\n '_arepoPDF_M51':'SIGAME v3'}\n lab = labs[p.table_ext]\n\n\n ax.text(0.05,0.9,p.line,transform=ax.transAxes,fontsize=13)\n ax.set_xlabel('log SFR - log SFR$_{MS,Salim+18}$')\n ax.set_ylabel('log L - log L$_{obs}$(SFR)')\n if not p.xlim: p.xlim = np.array([-3,3])\n if not p.ylim: \n p.ylim = [np.median(ldL_line) - 4,np.median(ldL_line) + 3]\n # if p.line == '[OI]63': p.ylim = [np.median(ldL_line) - 5,np.median(ldL_line) + 4]\n # if 'CO' in p.line: p.ylim = [np.median(ldL_line) - 4,np.median(ldL_line) + 4]\n\n ax.set_xlim(p.xlim)\n ax.set_ylim(p.ylim)\n ax.plot([0,0],ax.get_ylim(),'--k',lw=1)\n ax.plot(ax.get_xlim(),[0,0],'--k',lw=1)\n\n if p.select == 'Sigma_M_H2':\n Sigma_M_H2 = M_H2/(np.pi*R_gas**2)/1e6 # per pc^-2\n m = ax.scatter(dlSFR[np.argsort(Sigma_M_H2)],ldL_line[np.argsort(Sigma_M_H2)],marker=marker,s=14,\\\n c=np.log10(Sigma_M_H2[np.argsort(Sigma_M_H2)]),vmin=-2.5,vmax=2.2,label=lab,alpha=0.5,zorder=10)\n if p.cb:\n cbar = plt.colorbar(m,ax=ax)\n cbar.set_label(label=r'log $\\Sigma_{H2}$ [M$_{\\odot}$/pc$^2$]',size=15)", "def light_measure_circle( data, weit_data, pix_size, cx, cy, R_bins, id_phy = False, z0 = None):\n\n\tNx = data.shape[1]\n\tNy = data.shape[0]\n\tx0 = np.linspace(0, Nx-1, Nx)\n\ty0 = np.linspace(0, Ny-1, Ny)\n\tpix_id = np.array(np.meshgrid(x0,y0))\n\n\t#..center pixel point\n\tdev_05_x = cx - np.int( cx )\n\tdev_05_y = cy - np.int( cy )\n\n\tif dev_05_x > 0.5:\n\t\txn = np.int( cx ) + 1\n\telse:\n\t\txn = np.int( cx )\n\n\tif dev_05_y > 0.5:\n\t\tyn = np.int( cy ) + 1\n\telse:\n\t\tyn = np.int( cy )\n\n\ttheta = np.arctan2((pix_id[1,:] - yn), (pix_id[0,:] - xn))\n\tchi = theta * 180 / np.pi\n\n\t# radius in unit of pixel number\n\trbin = R_bins.astype( int )\n\n\tN_bins = len( rbin )\n\n\tintens = np.zeros(N_bins, dtype = np.float)\n\tAngl_r = np.zeros(N_bins, dtype = np.float)\n\tN_pix = np.zeros(N_bins, dtype = np.float)\n\tnsum_ratio = np.zeros(N_bins, dtype = np.float)\n\n\tdr = np.sqrt(( pix_id[0] - xn )**2 + ( pix_id[1] - yn)**2)\n\n\tfor k in range( N_bins ):\n\n\t\tir = dr == rbin[ k ]\n\n\t\tbool_sum = np.sum(ir)\n\n\t\tif bool_sum == 0:\n\t\t\tAngl_r[k] = rbin[ k ] * pix_size\t\t\n\n\t\telse:\n\t\t\tweit_arr = weit_data[ir]\n\t\t\tsamp_flux = data[ir]\n\t\t\tsamp_chi = chi[ir]\n\n\t\t\ttot_flux = np.nansum(samp_flux * weit_arr) / np.nansum(weit_arr)\n\t\t\tidnn = np.isnan( samp_flux )\n\t\t\tN_pix[k] = np.sum( idnn == False )\n\t\t\tnsum_ratio[k] = np.nansum(weit_arr) / np.sum( idnn == False )\n\n\t\t\tintens[k] = tot_flux + 0.\n\t\t\tAngl_r[k] = np.nansum( dr[ir] * weit_arr ) / np.nansum( weit_arr ) * pix_size\n\n\tidzo = N_pix < 1\n\n\tIntns = intens.copy()\n\tIntns[idzo] = 0.\n\tnsum_ratio[idzo] = 0.\n\n\tIntns = Intns / pix_size**2\n\n\tif id_phy:\n\n\t\tDa0 = Test_model.angular_diameter_distance( z0 ).value ## in unit 'Mpc'\n\t\tphy_r = Angl_r * Da0 * 1e3 / rad2arcsec # in unit of kpc\n\t\treturn Intns, phy_r, N_pix, nsum_ratio\n\n\telse:\n\t\treturn Intns, Angl_r, N_pix, nsum_ratio", "def section_coordinates():\n \n gh_width = 30.0 # in feet\n gh_width_west = gh_width/2.0\n N_x = 100\n dx = gh_width_west/100.0\n gh_length = 48 # in feet\n \n xvalues = np.linspace(0,(N_x)*dx,N_x+1) # array for width\n yvalues = np.linspace(0,gh_length,num=gh_length+1) # array for height\n zvalues_west = np.zeros(N_x+1) # array for height\n \n for i in range(0,len(xvalues)):\n zvalues_west[i] = 7.29944696 + (1.27415518*xvalues[i]) + (-0.0680139854*xvalues[i]**2) + (0.00152035861*xvalues[i]**3)\n i += 1\n \n roof_slopes_west = np.zeros(N_x+1)\n roof_lengths = np.zeros(N_x+1)\n\n total_length_west = 0\n\n for i in range(1,len(xvalues)):\n dz = zvalues_west[i] - zvalues_west[i-1]\n roof_slopes_west[i] = dz/dx\n roof_lengths[i] = (dz**2 + dx**2)**0.5\n total_length_west += roof_lengths[i]\n \n zvalues_east = np.flip(zvalues_west, axis=0)\n zvalues_west = zvalues_west[:-1]\n zvalues = np.concatenate((zvalues_west, zvalues_east), axis=0)\n \n xx, yy = np.meshgrid(xvalues, yvalues) \n \n plt.plot(xx, yy, marker='.', color='k', linestyle='none')\n plt.axis('equal')\n plt.show() \n\n return roof_slopes_west", "def rolling_circular_stdev(\n data: np.ndarray, fps: int, time_windows: np.ndarray\n ) -> np.ndarray:\n\n data = np.deg2rad(data)\n results = np.full((data.shape[0], time_windows.shape[0]), 0.0)\n for time_window_cnt in prange(time_windows.shape[0]):\n window_size = int(time_windows[time_window_cnt] * fps)\n for window_end in prange(window_size, data.shape[0] + 1, 1):\n window_data = data[window_end - window_size : window_end]\n results[window_end - 1][time_window_cnt] = stats.circvar(window_data)\n return np.round(results, 4)", "def calc_length_distortion(self, x, y):\n\n # get the major axis of the used Earth ellipsoid\n ellaxis = Geodesic.WGS84.a\n\n # get the centre of the subgrid's projection\n fe = self.core.projection.osr_spref.GetProjParm('false_easting')\n fn = self.core.projection.osr_spref.GetProjParm('false_northing')\n\n # create the distances to the projection centre\n dists = np.sqrt((np.array(x) - fe)**2 + (np.array(y) - fn)**2)\n\n # apply equation for distortion in direction perpendicular to the radius, k:\n # k = c/geod.a / np.sin(c/geod.a)\n #\n # is it just about the distance to the centre (c), and as are equally long\n # on the ellipsoid and on the projected plane (the core of of AEQD!)\n k = dists / ellaxis / np.sin(dists / ellaxis)\n\n return k", "def filter_2d(x, sdm, size):\n col_begin = np.max([1, x[1] - int(size / 2)]) - 1\n col_end = np.min([x[1] + int(size / 2), len(sdm)]) - 1\n row_begin = np.max([1, x[0] - int(size / 2)]) - 1\n row_end = np.min([x[0] + int(size / 2), len(sdm)]) - 1\n\n beats_count = np.min([row_end - row_begin, col_end - col_begin])\n\n area = sdm[row_begin : row_begin + beats_count, col_begin : col_begin + beats_count]\n\n # the main diagonal\n main_diag = np.diag(area, 0)\n\n # black diagonals\n diags = np.concatenate((main_diag, np.diag(area, -int(size / 2)), np.diag(area, int(size / 2))))\n\n _alpha = np.mean(diags)\n _beta = np.mean(main_diag)\n _lambda = (np.sum(area) - np.sum(diags)) / (beats_count * beats_count - len(diags))\n\n rho_alpha = _alpha / _lambda\n rho_bera = _beta / _lambda\n\n return rho_alpha, rho_bera", "def radarScat(sp, wl, K2=0.93):\n#TODO check if K2 is for ice or liquid!\n prefactor = 2*np.pi*wl**4/(np.pi**5*K2)\n \n \n reflect_hh = prefactor*(sp.Z11+sp.Z22+sp.Z12+sp.Z21)\n reflect_vv = prefactor*(sp.Z11+sp.Z22-sp.Z12-sp.Z21)\n kdp = 1e-3*(180.0/np.pi)*wl*sp.S22r_S11r\n\n reflect_hv = prefactor*(sp.Z11 - sp.Z12 + sp.Z21 - sp.Z22)\n #reflect_vh = prefactor*(sp.Z11 + sp.Z12 - sp.Z21 - sp.Z22).values\n ldr_h = reflect_hh/reflect_hv\n \n # delta_hv np.arctan2(Z[2,3] - Z[3,2], -Z[2,2] - Z[3,3])\n #a = (Z[2,2] + Z[3,3])**2 + (Z[3,2] - Z[2,3])**2\n #b = (Z[0,0] - Z[0,1] - Z[1,0] + Z[1,1])\n #c = (Z[0,0] + Z[0,1] + Z[1,0] + Z[1,1])\n #rho_hv np.sqrt(a / (b*c))\n rho_hv = np.nan*np.ones_like(reflect_hh) # disable rho_hv for now\n #Ah = 4.343e-3 * 2 * scatterer.wavelength * sp.S22i.values # attenuation horizontal polarization\n #Av = 4.343e-3 * 2 * scatterer.wavelength * sp.S11i.values # attenuation vertical polarization\n\n #- test: calculate extinction: TODO: test Cextx that is given in DDA with this calculation.\n k = 2 * np.pi / (wl)\n cext_hh = sp.S22i*4.0*np.pi/k\n cext_vv = sp.S11i*4.0*np.pi/k\n \n return reflect_hh, reflect_vv, reflect_hv, kdp, rho_hv, cext_hh, cext_vv", "def density_2d(self, x, y, Rs, rho0, gamma_inner, gamma_outer, center_x=0, center_y=0):\n x_ = x - center_x\n y_ = y - center_y\n R = np.sqrt(x_ ** 2 + y_ ** 2)\n x = R / Rs\n Fx = self._f(x, gamma_inner, gamma_outer)\n return 2 * rho0 * Rs * Fx", "def nfw2D_smoothed(self, R, Rs, rho0, r200, pixscale):\n x = R/Rs\n d = pixscale/(2*Rs)\n a = np.empty_like(x)\n x_ = x[x > d]\n upper = x_+d\n lower = x_-d\n\n a[x > d] = 4*rho0*Rs**3*(self.g(upper)-self.g(lower))/(2*x_*Rs*pixscale)\n a[x < d] = 4*rho0*Rs**3*self.g(d)/((pixscale/2)**2)\n return a", "def find_large_separation(self):\n\n x = self.modes['n'] # radial order\n y = self.modes['freq'] # frequency\n wid = (0.66*self.numax**0.88)/2/np.sqrt(2*np.log(2.0))\n w = (np.exp((-(y-self.numax)**2)/(2*wid**2))) # weight\n\n mN = np.sum(w)*np.sum(w*x*y) - np.sum(w*x)*np.sum(w*y)\n D = np.sum(w)*np.sum(w*x**2) - np.sum(w*x)**2\n Dn = mN/D\n #print Dn\n\n return Dn", "def calculate_curvature_radius(self):\n\n # meters per pixel in y dimension\n ym_per_pix = config[\"video\"][\"y_meters_per_pixel\"]\n frame_height = config[\"video\"][\"size\"][1]\n\n # y_eval is where we want to evaluate the fits for the line radius calcuation\n # for us it's at the bottom of the image for us, and because we know\n # the size of our video/images we can just hardcode it\n y_eval = frame_height * ym_per_pix\n fit = self.line_fit_m\n\n # https://stackoverflow.com/a/40021903\n if fit.size != 0:\n curve_rad = ((1 + (2 * fit[0] * y_eval + fit[1]) ** 2) ** 1.5) / np.absolute(2 * fit[0])\n else:\n curve_rad = None\n self.curvature_radius = curve_rad", "def get_SSD():\n dist = 0\n # traversal of pixels in potential Bi+1 block\n # compare corresponding pixel positions with source block in f1 and neighbour block in f2\n y1 = center_y1 - block_rad # start pos.\n for y2 in range(center_y2 - block_rad, (center_y2 - block_rad + block_size)):\n x1 = center_x1 - block_rad # start pos\n for x2 in range(center_x2 - block_rad, (center_x2 - block_rad + block_size)):\n try:\n # displacement formula for RGB channels of each pixel in block\n dist = dist + (frame1[y1][x1][0] - frame2[y2][x2][0])**2 + (frame1[y1][x1][1] - frame2[y2][x2][1])**2 + (frame1[y1][x1][2] - frame2[y2][x2][2])**2\n except RuntimeWarning:\n pass\n x1 += 1\n y1 += 1\n return math.sqrt(dist)", "def calc_curvature(self, windows: List[Window]):\n x, y = zip(*[window.pos_xy() for window in windows])\n x = np.array(x)\n y = np.array(y)\n fit_cr = np.polyfit(y * camera.y_m_per_pix, x * camera.x_m_per_pix, 2)\n y_eval = np.max(y)\n return ((1 + (2 * fit_cr[0] * y_eval * camera.y_m_per_pix + fit_cr[1]) ** 2) ** 1.5) / np.absolute(\n 2 * fit_cr[0])", "def get_drainage_data(self):\n im = self.result\n sizes = sp.unique(im)\n R = []\n Snwp = []\n Vp = sp.sum(im > 0)\n for r in sizes[1:]:\n R.append(r)\n Snwp.append(sp.sum(im >= r))\n Snwp = [s/Vp for s in Snwp]\n data = namedtuple('xy_data', ('radius', 'saturation'))\n return data(R, Snwp)", "def calc_length_distortion_on_ellipsoid(self, lon, lat):\n\n # get the subgrid\n sg, _, _ = self.lonlat2xy(lon, lat)\n\n lon0 = self.subgrids[str(sg)].core.projection.osr_spref.GetProjParm('central_meridian')\n lat0 = self.subgrids[str(sg)].core.projection.osr_spref.GetProjParm('latitude_of_origin')\n\n # get spherical distance and azimuth between projection centre and point of interest\n geod = Geodesic.WGS84\n gi = geod.Inverse(lat0, lon0, lat, lon)\n c1 = gi['s12']\n az1 = gi['azi1']\n\n # apply equation for distortion in direction perpendicular to the radius, k:\n # k = c/geod.a / np.sin(c/geod.a)\n k = c1 / geod.a / np.sin(c1 / geod.a)\n\n return k", "def get_Curvature(self):\n #return str(np.mean([self.line_l.get_CurveRad(), self.line_r.get_CurveRad()]))\n y = np.linspace(0,719, 10)\n x = self.center_poly(y)\n fit_scaled = np.polyfit(y*self.line_l.y_pxm,x*self.line_l.x_pxm, deg=2)\n curverad = ((1 + (2 * fit_scaled[0] * 600 + fit_scaled[1]) ** 2) ** 1.5) / np.absolute(2 * fit_scaled[0])\n\n if len(self.curve_buffer) > 15:\n self.curve_buffer.pop(0)\n\n self.curve_buffer.append(curverad)\n _, self.curve_buffer = self.line_l.remove_outliers(self.curve_buffer,[None]*len(self.curve_buffer), m=3)\n buff_mean= np.mean(self.curve_buffer)\n #print(\"Buf Mean: \" +str(buff_mean))\n #outlier = np.abs(buff_mean - curverad) > np.std(self.curve_buffer)*2\n if curverad > 4000.0:\n buff_mean = \"Straight Lane\"\n else:\n buff_mean = str(int(buff_mean)) + \" m\"\n\n return buff_mean" ]
[ "0.5454819", "0.53788716", "0.5350352", "0.5328957", "0.5292687", "0.52506727", "0.5144197", "0.51307636", "0.5065308", "0.50575227", "0.50207204", "0.50146633", "0.49834746", "0.4977443", "0.4975468", "0.49359798", "0.49337485", "0.49244788", "0.49076614", "0.4901918", "0.4901232", "0.4893155", "0.4874633", "0.4869597", "0.4863315", "0.48629382", "0.48519906", "0.48444718", "0.48389986", "0.4833917" ]
0.55072665
0
Turns a numpy array into a string. Use str2np to convert back. Yes, it is absurd that we need to do this. We shouldn't. But from my Expert
def np2str(a: np.ndarray) -> str: return json.dumps(a.tolist())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def arraystr(a, max_line_width=None, precision=None, suppress_small=None):\n return np.array2string(a, max_line_width,\n precision, suppress_small,\n separator=', ', prefix=\"\", style=str)\\\n .replace('..., ', '..., ' if PY3 else 'Ellipsis, ')", "def to_str(array, encoding='utf8'):\n\n if not isinstance(array, np.ndarray):\n raise ValueError('input should be a NumPy array.')\n\n return np.char.decode(array, encoding)", "def array2anyscript(arr):\n def tostr(v):\n if np.isreal(v):\n return '{:.12g}'.format(v)\n elif isinstance(v, (string_types, np.str_)):\n return '\"{}\"'.format(v)\n\n def createsubarr(arr):\n outstr = \"\"\n if isinstance(arr, np.ndarray):\n if len(arr) == 1 and not isinstance(arr[0], np.ndarray):\n return '{'+tostr(arr[0]) + '},'\n outstr += '{'\n for row in arr:\n outstr += createsubarr(row)\n outstr = outstr.strip(',') + '},'\n return outstr\n else:\n return outstr + tostr(arr)+','\n if isinstance(arr, np.ndarray) and not arr.shape:\n return tostr(arr.tolist())\n elif isinstance(arr, np.ndarray) :\n return createsubarr(arr).strip(',')\n elif isinstance( arr, float):\n return tostr(arr)\n else:\n return str(arr)", "def numpy_to_plotlystring(numpy_array, precision=4):\n np.set_printoptions(threshold=np.nan) # enables full printing\n n_data = np.shape(numpy_array)[0]\n # prevent array2string to cut string by ofsestting maximal width\n max_line_width=n_data*(8) # Output Format [#.####, ...], thus 7 chars per entry, added 100 for safekeeping\n string_array = np.array2string(numpy_array, separator=',', max_line_width=n_data*(8), precision=precision, formatter={'float_kind':lambda x: \"%.4f\" % x})\n np.set_printoptions(linewidth=75, threshold=1000) # restores default\n return string_array", "def transfer_2d_array_to_str(array):\n str_list = []\n for r in array:\n str_list.append(\",\".join([str(e) for e in r]))\n return \" \".join(str_list)", "def image2str(arr: np.ndarray, char='X', *, columns=28) -> str:\n if isinstance(arr, np.ndarray):\n dims = len(arr.shape)\n else:\n dims, a = 0, arr\n while True:\n try:\n a = a[0]\n except TypeError:\n break\n dims += 1\n if dims == 1:\n arr = [arr[i:i+columns] for i in range(0, len(arr), columns)]\n # for i in arr: print(' '.join(f'{x:>3}' for x in i)) # to print nicely as numbers\n elif dims != 2:\n raise ValueError(\"Can not convert many-dimensional array to string\")\n return '\\n'.join(''.join(char if i else ' ' for i in row) for row in arr)", "def cast_numpy_to_txt(arr, output_file):\n shape = arr.shape\n arr = arr.reshape([shape[0] * shape[1], shape[2]])\n\n np.savetxt(fname=output_file, X=arr, delimiter=' ', fmt='%.18e', newline='\\n', )", "def array_2d_str(array, fmt='{:.2f}', sep=', ', row_sep='\\n', with_boundary=True):\n ret = row_sep.join([array_str(x, fmt=fmt, sep=sep, with_boundary=with_boundary) for x in array])\n if with_boundary:\n ret = '[' + ret + ']'\n return ret", "def str_array(arr, sides=(3, 3), delim=\", \", format=None, log=False, label_log=True):\n arr = np.asarray(arr)\n if log:\n arr = np.log10(arr)\n\n len_arr = arr.size\n beg, end = _str_array_get_beg_end(sides, len_arr)\n\n if format is None:\n format = _guess_str_format_from_range(arr)\n\n # Create the style specification\n form = \"{{{}}}\".format(format)\n\n arr_str = _str_array_1d(arr, beg, end, form, delim)\n if log and label_log:\n arr_str += \" (log values)\"\n\n return arr_str", "def array_str(array, fmt='{:.2f}', sep=', ', with_boundary=True):\n ret = sep.join([fmt.format(float(x)) for x in array])\n if with_boundary:\n ret = '[' + ret + ']'\n return ret", "def convert_to_string_array(matrix):\n res = []\n for row in matrix:\n res.append(''.join(row))\n return '\\n'.join(res)", "def convert_char_array_to_string(char_array):\n str = \"\"\n try:\n return str.join(char_array)\n except:\n return \"Error occured: variable non string\"", "def _numpy_text(tensor):\n if dtype_util.is_numpy_compatible(tensor.dtype):\n value = np.array(tensor)\n if value.shape:\n text = repr(value)\n else:\n text = str(value)\n else:\n text = '<unprintable>'\n if '\\n' in text:\n text = '\\n' + text\n return text", "def h5_to_string(char_array):\n import numpy as np\n if type(char_array) in [bytes, np.bytes_]:\n return char_array.decode()\n if type(char_array) == str:\n return char_array\n raise TypeError(\"Char_array must be a string or byte array!\\n\"\n +\"Your type is: {}.\\n\".format(type(char_array)))", "def _serialize_array(self, array):\n buffer = io.BytesIO()\n np.save(buffer, array)\n return buffer.getvalue()", "def json_serialize_numpy_array(array):\n return json.dumps(json_ready_numpy_array(array))", "def json_serialize_numpy_array(array):\n return json.dumps(json_ready_numpy_array(array))", "def int_array_to_str(result):\n result = [str(i) for i in result]\n return \"\".join(result)", "def normalize_array(var):\n if np.issubdtype(var.dtype, 'S1'):\n if var.dtype == str:\n # Python 2 on netCDF4 'string' variables needs this.\n # Python 3 returns false for np.issubdtype(var.dtype, 'S1')\n return var[:]\n\n def decoder(x):\n return str(x.decode('utf-8'))\n vfunc = np.vectorize(decoder)\n return vfunc(nc4.chartostring(var[:]))\n else:\n return var[:]", "def arr2str(arr, sep=\", \", fmt=\"{}\"):\n return sep.join([fmt.format(v) for v in arr])", "def float_array_string(arr: Iterable[float]) -> str:\n return \"[\" + \", \".join([\"{:.4f}\".format(el) for el in arr]) + \"]\"", "def netcdf_compatible_array(arry):\n arry = strip_array_wrappers(arry)\n\n if arry.ndim > 0:\n for _ in range(3):\n if arry.dtype.char != \"O\" or arry.ndim == 0:\n break\n\n if arry.shape[0] == 1:\n arry = np.array(arry[0])\n else:\n arry = np.array(tuple(arry))\n\n if \"S\" in arry.dtype.char:\n return np.char.decode(arry, \"ascii\")\n # TODO: ensure no float16, ...\n return arry", "def array_to_string(array: list, separator: str) -> str:\n string = ''\n for value in array:\n if type(value) == str:\n value = value.replace(\"'\", \"''\")\n string += \"'\" + value + \"'\"\n elif value is None:\n string += 'null'\n else:\n string += str(value)\n string += separator\n\n string = string[:-len(separator)]\n\n return string", "def str_to_numpy(string_array):\n if pd.isnull(string_array):\n return(np.NaN)\n else:\n return np.array(ast.literal_eval(string_array))", "def array_to_concatenated_string(array):\r\n return \",\".join(str(x) for x in array)", "def _unicode(arr):\n try:\n return unicode(arr)\n except UnicodeEncodeError:\n dt = arr.dtype.newbyteorder('S')\n return unicode(arr.view(dt))", "def data_2_base64(data: np.ndarray) -> str:\n bytes_io = io.BytesIO()\n np.save(bytes_io, data, allow_pickle=False)\n return base64.b64encode(zlib.compress(bytes_io.getvalue())).decode('utf-8')", "def obs_to_string(observations):\n str_obs = []\n for obs in observations:\n str_obs.append(obs.reshape(-1).tostring())\n return str_obs", "def array_to_concatenated_string(array):\n return \",\".join(str(x) for x in array)", "def matrix2str(A):\n s = \"\"\n for x in numpy.nditer(A, order='F'):\n s = s + str(x) + \",\"\n\n return s" ]
[ "0.7668864", "0.7660055", "0.74028087", "0.73397017", "0.7132134", "0.6980833", "0.6820138", "0.68081397", "0.67062193", "0.6697705", "0.6608742", "0.65923023", "0.65681803", "0.65397334", "0.6435107", "0.6393014", "0.6393014", "0.6337105", "0.6320334", "0.63053894", "0.62946767", "0.6283385", "0.6262201", "0.6236794", "0.6234114", "0.6191239", "0.6190846", "0.61761916", "0.6170926", "0.6169537" ]
0.778396
0
Turns a string into a numpy array. Reverse of np2str.
def str2np(s: str) -> np.ndarray: return np.array(json.loads(s))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def str_to_numpy(string_array):\n if pd.isnull(string_array):\n return(np.NaN)\n else:\n return np.array(ast.literal_eval(string_array))", "def string_to_array(arg):\n\n res = arg.replace('[', '').replace(']', '').replace(',', '')\n return np.array(res.split(' '), dtype=np.int8)", "def concatenated_string_to_array(string):\r\n return np.array([int(x) for x in string.split(\",\")])", "def concatenated_string_to_array(string):\n return np.array([int(x) for x in string.split(\",\")])", "def convert_strings_to_array(strings):\n row_strings = strings.split(\"\\n\")\n new_array = np.array([[float(i) for i in row_string.split(\",\")] for row_string in row_strings])\n shape = new_array.shape\n if shape[1]==2:\n return new_array\n elif shape[0]==2:\n return new_array.T\n else:\n print \"Currently only accepting arrays of shape (2,x) or (x,2)\"\n return None", "def parse_string(s):\n # type: (str) -> Union[str, np.ndarray, float]\n v = re.sub(r'[\\[\\]]', '', s)\n\n if ',' in v:\n v = v.split(',')\n elif ';' in v:\n v = v.split(';')\n\n try:\n v = np.atleast_1d(np.array(v, dtype=float))\n if v.size == 1:\n v = v[0]\n return v\n except ValueError:\n return s", "def dna_string_to_array(str):\n\tlookup = { \n\t'A' : np.array([[1.0], [-1.0], [-1.0], [-1.0]]), \n\t'C' : np.array([[-1.0], [1.0], [-1.0], [-1.0]]),\n\t'T' : np.array([[-1.0], [-1.0], [1.0], [-1.0]]),\n\t'G' : np.array([[-1.0], [-1.0], [-1.0], [1.0]]) }\n\treturn np.concatenate([lookup[ch] for ch in str.upper()])", "def fromstring(string, **kwargs):\n\n return call_origin(numpy.fromstring, string, **kwargs)", "def __build_array(string):\n ar = []\n tmp = string.split('.')\n\n for item in tmp:\n ar.append( item.strip().strip('[').strip(']').strip() )\n\n return ar", "def getarray(str_array):\n if ':' in str_array:\n s=str_array.split(':')\n if 'm' in s[0]:\n s2=[cu.convlen(x) for x in s]\n else: \n s2=[float(x) for x in s]\n ns=round((s2[2]-s2[0])/s2[1])+1\n s3=np.linspace(s2[0],s2[2],ns)\n else:\n s3=cu.convlen(str_array)\n \n return s3", "def base64_decode_array(inStr, dtype):\n return np.frombuffer(base64.decodestring(inStr), dtype=dtype)", "def str_to_img_ndarrary(s):\n img = str_to_pil_img(s)\n img_array = np.array(img)\n return skimage.util.img_as_float(img_array)", "def ascii_to_numpy(ascii_diagram, as_bytes=True):\n ascii_diagram = [list(i) for i in ascii_diagram]\n ascii_diagram = np.array(ascii_diagram)\n v_to_bytes = np.vectorize(to_bytes)\n return v_to_bytes(ascii_diagram) if as_bytes else ascii_diagram", "def string_to_array(s):\n\n if isinstance(s, str):\n out = s.split(\"|\")\n elif math.isnan(s):\n out = []\n else:\n raise ValueError(\"Value must be either string of nan\")\n return out", "def base64_2_data(s: str) -> np.ndarray:\n saved_bytes = io.BytesIO(zlib.decompress(base64.b64decode(s)))\n return np.load(saved_bytes)", "def line_to_data(line, np_array=True, dtype=int):\n if np_array:\n return np.fromstring(line, dtype=dtype, sep=\" \")\n else:\n return [dtype(x) for x in line.split(\" \")]", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n _x = self\n start = end\n end += 152\n (_x.tcp, _x.ori, _x.zone, _x.vacuum, _x.workx, _x.worky, _x.workz, _x.workq0, _x.workqx, _x.workqy, _x.workqz, _x.toolx, _x.tooly, _x.toolz, _x.toolq0, _x.toolqx, _x.toolqy, _x.toolqz, _x.ret,) = _struct_2d2q14dq.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.msg = str[start:end].decode('utf-8')\n else:\n self.msg = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def parse_string(str_arr):\n def to_arr(str_arr):\n \"\"\" Switch to list. \"\"\"\n row = str_arr.replace(']', '').\\\n replace('[', '').\\\n replace('{', '').\\\n replace('}', '').\\\n replace('\\n', '').split()\n\n if '+-' in row:\n row = kludge_gvars(row)\n row = [gv.gvar(str(elt)) for elt in row]\n return np.array(row)\n\n def kludge_gvars(mangled):\n \"\"\"\n Occasionally, gvars get rendered to strings as, e.g.,\n -4e-06 +- 1 instead of -0.000006(1.0). This makes a\n complete mess of trying to parse the a list of gvar\n which has been turned into a string, e.g.,\n '[1(2) 1 +- 2 0.003(2)]', since the usual str.split()\n separates '1 +- 2' --> ['1','+-','2']. This function is\n a kludge which works around this difficulty.\n \"\"\"\n # Loop in reverse looking for '+-', but don't run off the end\n for idx in range(len(mangled) - 1)[::-1]:\n if mangled[idx + 1] == '+-':\n reunited = ' '.join(mangled[idx:idx + 3])\n # Throw away the used elements...\n for _ in range(3):\n mangled.pop(idx)\n # Repair the list with reunited gvar string\n mangled.insert(idx, reunited)\n return mangled\n\n return to_arr(str_arr)", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n start = end\n end += 8\n (self.i,) = _struct_d.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def parse_key_as_numpy_array(obj, string, key, value):\n match_string = \"r\"+'\"'+string+'\"'\n pieces = key.split(string)\n if len(pieces) > 1:\n attr = pieces.pop(0)\n if not hasattr(obj,attr):\n raise Exception(\"Cannot add a numpy array, only edit.\")\n current_attribute = getattr(obj,attr)\n if not isinstance(current_attribute, numpy.ndarray):\n raise Exception(\"The atribute \" + key + \" of the object \" + \n obj.__name__ + \" must be a numpy array a priori.\" )\n full_dim = []\n for i in range(len(pieces)):\n dim = pieces[i]\n dim_pieces = dim.split(\"-\")\n if dim == \"-\":\n dim_len = current_attribute.shape[i]\n dim = slice(0,dim_len)\n elif len(dim_pieces) == 2:\n start, stop = dim.split(\"-\")\n dim = slice(int(start),int(stop))\n full_dim.append(dim)\n current_attribute[full_dim] = value\n return full_dim\n return key", "def from_ascii(s):\n\treturn numpy.array(\n\t\t[\n\t\t\tord(c)\n\t\t\tfor c in s\n\t\t],\n\t\tdtype=numpy.uint8\n\t)", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n _x = self\n start = end\n end += 57\n (_x.decision, _x.distance, _x.oriX, _x.oriY, _x.oriZ, _x.placX, _x.placY, _x.placZ,) = _get_struct_b7d().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def to_numpy(array):\n if not CUPY_LOADED:\n return array\n else:\n return xp.asnumpy(array)", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n start = end\n end += 1\n (self.result,) = _struct_B.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def get_ndarray(name, arr_shape, arr_type):\n arr_str = get_from_db(key=name)\n return np.fromstring(arr_str, dtype=np.dtype(arr_type)) \\\n .reshape(arr_shape)", "def cast_txt_to_numpy(iuput_file):\n # Load the txt file\n with open(iuput_file, 'r') as tmpfile:\n lines = tmpfile.readlines()\n\n # Restore the numpy array\n holder = []\n for line in lines:\n holder.append([float(x) for x in line.split(' ')])\n\n # Construct the numpy array\n holder = np.array(holder)\n\n return holder", "def s2a(s):\n return np.array(list(s))", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n start = end\n end += 2580\n self.Rscanpose = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=645)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n start = end\n end += 4\n (self.numberOfTSPTurtles,) = _get_struct_i().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n end = 0\n _x = self\n start = end\n end += 72\n (_x.originId, _x.originType, _x.destinationId, _x.destinationType, _x.range, _x.ts, _x.seq, _x.rxPower, _x.channel, _x.datarate, _x.prf, _x.preambleLength, _x.txGain, _x.angle,) = _get_struct_ihih3i3d2i2d().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill" ]
[ "0.8174913", "0.78813064", "0.750545", "0.742618", "0.7182514", "0.70567936", "0.6892877", "0.6872287", "0.67668694", "0.67524487", "0.67366683", "0.6626965", "0.6586725", "0.6580806", "0.64523757", "0.6432584", "0.6423253", "0.63838595", "0.6343251", "0.63229716", "0.63114595", "0.6304376", "0.62693256", "0.626172", "0.6249253", "0.6236253", "0.62140894", "0.6195573", "0.61927503", "0.6177052" ]
0.8019013
1
Syncopy custom ExceptionHandler. Prints formatted and colored messages and stack traces, and starts debugging if `%pdb` is enabled in Jupyter/iPython.
def SPYExceptionHandler(*excargs, **exckwargs): # Depending on the number of input arguments, we're either in Jupyter/iPython # or "regular" Python - this matters for coloring error messages if len(excargs) == 3: isipy = False etype, evalue, etb = excargs else: etype, evalue, etb = sys.exc_info() try: # careful: if iPython is used to launch a script, ``get_ipython`` is not defined ipy = get_ipython() isipy = True cols = ipy.InteractiveTB.Colors cols.filename = cols.filenameEm cols.bold = ansiBold sys.last_traceback = etb # smartify ``sys`` except NameError: isipy = False # Pass ``KeyboardInterrupt`` on to regular excepthook so that CTRL + C # can still be used to abort program execution (only relevant in "regular" # Python prompts) if issubclass(etype, KeyboardInterrupt) and not isipy: sys.__excepthook__(etype, evalue, etb) return # Starty by putting together first line of error message emsg = "{}\nSyNCoPy encountered an error in{} \n\n".format(cols.topline if isipy else "", cols.Normal if isipy else "") # If we're dealing with a `SyntaxError`, show it and getta outta here if issubclass(etype, SyntaxError): # Just format exception, don't mess around w/ traceback exc_fmt = traceback.format_exception_only(etype, evalue) for eline in exc_fmt: if "File" in eline: eline = eline.split("File ")[1] fname, lineno = eline.split(", line ") emsg += "{}{}{}".format(cols.filename if isipy else "", fname, cols.Normal if isipy else "") emsg += ", line {}{}{}".format(cols.lineno if isipy else "", lineno, cols.Normal if isipy else "") elif "SyntaxError" in eline: smsg = eline.split("SyntaxError: ")[1] emsg += "{}{}SyntaxError{}: {}{}{}".format(cols.excName if isipy else "", cols.bold if isipy else "", cols.Normal if isipy else "", cols.bold if isipy else "", smsg, cols.Normal if isipy else "") else: emsg += "{}{}{}".format(cols.line if isipy else "", eline, cols.Normal if isipy else "") # Show generated message and leave (or kick-off debugging in Jupyer/iPython if %pdb is on) logger = get_parallel_logger() logger.critical(emsg) if isipy: if ipy.call_pdb: ipy.InteractiveTB.debugger() return # Build an ordered(!) dictionary that encodes separators for traceback components sep = OrderedDict({"filename": ", line ", "lineno": " in ", "name": "\n\t", "line": "\n"}) # Find "root" of traceback tree (and remove outer-most frames) keepgoing = True while keepgoing: frame = traceback.extract_tb(etb)[0] etb = etb.tb_next if frame.filename.find("site-packages") < 0 or \ (frame.filename.find("site-packages") >= 0 and \ frame.filename.find("syncopy") >= 0): tb_entry = "" for attr in sep.keys(): tb_entry += "{}{}{}{}".format(getattr(cols, attr) if isipy else "", getattr(frame, attr), cols.Normal if isipy else "", sep.get(attr)) emsg += tb_entry keepgoing = False # Format the exception-part of the traceback - the resulting list usually # contains only a single string - if we find more just use everything exc_fmt = traceback.format_exception_only(etype, evalue) if len(exc_fmt) == 1: exc_msg = exc_fmt[0] idx = exc_msg.rfind(etype.__name__) if idx >= 0: exc_msg = exc_msg[idx + len(etype.__name__):] exc_name = "{}{}{}{}".format(cols.excName if isipy else "", cols.bold if isipy else "", etype.__name__, cols.Normal if isipy else "") else: exc_msg = "".join(exc_fmt) exc_name = "" # Now go through traceback and put together a list of strings for printing if __tbcount__ and etb is not None: emsg += "\n" + "-"*80 + "\nAbbreviated traceback:\n\n" tb_count = 0 tb_list = [] for frame in traceback.extract_tb(etb): if frame.filename.find("site-packages") < 0 or \ (frame.filename.find("site-packages") >= 0 and \ frame.filename.find("syncopy") >= 0): tb_entry = "" for attr in sep.keys(): tb_entry += "{}{}{}{}".format("", # placeholder for color if wanted getattr(frame, attr), "", # placeholder for color if wanted sep.get(attr)) tb_list.append(tb_entry) tb_count += 1 if tb_count == __tbcount__: break emsg += "".join(tb_list) # Finally, another info message if etb is not None: emsg += "\nUse `import traceback; import sys; traceback.print_tb(sys.last_traceback)` " + \ "for full error traceback.\n" # Glue actual Exception name + message to output string emsg += "{}{}{}{}{}".format("\n" if isipy else "", exc_name, cols.bold if isipy else "", exc_msg, cols.Normal if isipy else "",) # Show generated message and get outta here logger = get_parallel_logger() logger.critical(emsg) # Kick-start debugging in case %pdb is enabled in Jupyter/iPython if isipy: if ipy.call_pdb: ipy.InteractiveTB.debugger()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def baseExceptionHandler(*args):\n\n\theader, frames, trcback = formatReport(*extractException(*args))\n\n\tLOGGER.error(\"!> {0}\".format(Constants.loggingSeparators))\n\tmap(lambda x: LOGGER.error(\"!> {0}\".format(x)), header)\n\n\tLOGGER.error(\"!> {0}\".format(Constants.loggingSeparators))\n\tmap(lambda x: LOGGER.error(\"!> {0}\".format(x)), frames)\n\n\tLOGGER.error(\"!> {0}\".format(Constants.loggingSeparators))\n\tsys.stderr.write(\"\\n\".join(trcback))\n\n\treturn True", "def hook_exceptions():\n\n if hasattr(sys.stdout, \"fileno\"): # when testing, sys.stdout is StringIO\n # reopen stdout in non buffered mode\n sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)\n # set the hook\n sys.excepthook = traceback_formatter", "def debug_error_handler(environ, start_response):\n exc_info = environ.get('com.xythian.shotweb.exception')\n write = start_response('500 Internal server error',\n [('Content-type', 'text/html')],\n exc_info)\n et, v, tb = exc_info\n import traceback\n traceback.print_exception(et, v, tb, file=sys.stderr)\n return cgitb.html(exc_info)", "def idb_excepthook(type, value, tb):\n if hasattr(sys, \"ps1\") or not sys.stderr.isatty():\n sys.__excepthook__(type, value, tb)\n else:\n traceback.print_exception(type, value, tb)\n print\n pdb.pm()", "def _catch_exceptions(self, exctype, value, tb):\n\n # Now we log it.\n self.error(\"Uncaught exception\", exc_info=(exctype, value, tb))\n\n # First, we print to stdout with some colouring.\n print_exception_formatted(exctype, value, tb)", "def exceptionTraceback(self, alwaysPrint = False):\n self.logPre( traceback.format_exc(), alwaysPrint )", "def my_err_handler(traceback, exec_info):\n print \"Custom function invoked\"\n print \"Formatted exception\"\n print traceback.format_exc()\n print \"System exec info\"\n print exec_info\n exp_type, exp_value, exp_traceback = exec_info\n print \"String formatted exception\"\n print traceback.format_exception(exp_type, exp_value, exp_traceback)\n print \"End of custom function\"", "def on_error(self, exception):\n traceback.print_exc()", "def exception_handler(exctype, val, trace):\n logger.info(\n ''.join(traceback.format_exception(exctype, val, trace)))", "def traceback_hook(type, value, traceback):\n logger.error(\"Uncaught Error:\", exc_info=(type, value, traceback))", "def traceback(self):", "def handle_error():\n print \"An error occurred. Trace:\\n\"\n traceback.print_exc()", "def handle_exception(self, exception, debug_mode): # pylint: disable-msg=C0103\n self.error(500)\n logger = logging\n if self.fsm:\n logger = self.fsm.logger\n logger.exception(\"FSMHandler caught Exception\")\n if debug_mode:\n import traceback, sys, cgi\n\n lines = ''.join(traceback.format_exception(*sys.exc_info()))\n self.response.clear()\n self.response.out.write('<pre>%s</pre>' % (cgi.escape(lines, quote=True)))", "def exception(self):\n exc_type, exc_value, exc_tb = sys.exc_info()\n cui.message(traceback.format_exception_only(exc_type, exc_value)[-1],\n log_message=traceback.format_exc())", "def showtraceback(self,exc_tuple = None):\n\n # Though this won't be called by syntax errors in the input line,\n # there may be SyntaxError cases whith imported code.\n if exc_tuple is None:\n type, value, tb = sys.exc_info()\n else:\n type, value, tb = exc_tuple\n if type is SyntaxError:\n self.showsyntaxerror()\n else:\n sys.last_type = type\n sys.last_value = value\n sys.last_traceback = tb\n self.InteractiveTB()\n if self.InteractiveTB.call_pdb and self.has_readline:\n # pdb mucks up readline, fix it back\n self.readline.set_completer(self.Completer.complete)", "def exception_handler(self, exception):\n pass", "def handle_exception(exc_type, exc_value, exc_traceback):\n exc_msg = traceback.format_exception(exc_type, exc_value, exc_traceback)\n exc_msg.insert(0, 'Uncaught exception on processor {}\\n'.format(mpiops.chunk_index))\n exc_msg = \"\".join(exc_msg)\n print(exc_msg, file=sys.stderr)", "def _exception_handler(self, loop: asyncio.AbstractEventLoop, context: dict):\n try:\n if 'exception' in context:\n exc = context['exception']\n print('Uncaught exception %s' % exc, file=sys.stderr)\n print(traceback.format_tb(exc.__traceback__), file=sys.stderr)\n if not self._ignore_exceptions:\n self.shutdown(1)\n else:\n print('Uncaught exception with unknown type ignored', file=sys.stderr)\n except:\n print('Something went seriously wrong. Error in error handler.', file=sys.stderr)\n print(context, file=sys.stderr)\n print(traceback.format_exc(), file=sys.stderr)", "def debug_on(*skip_exceptions):\n if not skip_exceptions:\n skip_exceptions = ()\n\n def decorator(f):\n global DEBUG\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n if not DEBUG:\n return f(*args, **kwargs)\n try:\n return f(*args, **kwargs)\n except Exception as e:\n for skip in skip_exceptions:\n if isinstance(e, skip):\n raise e\n print '\\n'\n for line in traceback.format_tb(sys.exc_info()[2]):\n print line\n print str(e.__class__.__name__) + ': ' + str(e) + '\\n'\n pdb.post_mortem(sys.exc_info()[2])\n raise e\n return wrapper\n\n return decorator", "def debug(sig, frame):\n d={'_frame':frame} # Allow access to frame object.\n d.update(frame.f_globals) # Unless shadowed by global\n d.update(frame.f_locals)\n\n i = code.InteractiveConsole(d)\n message =\"Signal received : entering python shell.Traceback:\"\n message += ''.join(traceback.format_stack(frame))\n i.interact(message)", "def handle_exception(self, exception):\n self._error_flag = True\n if exception.__class__ is blotish.BlotishError:\n blot_common.print_blot_error(exception)\n else:\n traceback.print_exc(file=sys.stdout)", "def graphical_exception_handler(self, exc_type, exc_value, exc_tb):\n bugdialog.ShowEI(exc_type, exc_value, exc_tb)\n if compat.PYTHON2: sys.exc_clear()", "def color_excepthook(pdb=0, mode=2, force=True):\n\n modus = ['Plain', 'Context', 'Verbose'][mode] # select the mode\n\n if force or not sys.excepthook == sys_orig_excepthook:\n sys.excepthook = ultratb.FormattedTB(mode=modus,\n color_scheme='Linux', call_pdb=pdb)", "def quiet_hook(kind, message, traceback):\n if QuietException in kind.__bases__:\n # Only print Error Type and Message\n print('{0}: {1}'.format(kind.__name__, message))\n else:\n # Print Error Type, Message and Traceback\n sys.__excepthook__(kind, message, traceback)", "def handler(self, exctype, value, traceback):\n\n message = 'error occurred({}) in {}:{}'.format(value, traceback.tb_frame.f_code.co_filename, traceback.tb_lineno)\n self.errorSignal.emit()\n self.logger.critical('Unhandled exception: {}'.format(message))", "def handle_exception(*exc_info):\n import logging\n import traceback\n\n logging.critical(\"\".join(traceback.format_exception(*exc_info)))", "def _error_handling(self,e,func):\n print(self.type, \" sufferred exception in \" , func , \":\" , e)", "def test_handle_uncaught_app_exception_with_rich(self):\n exc = Exception(\"boom!\")\n with testutil.patch_config_options({\"logger.enableRich\": True}):\n with io.StringIO() as buf:\n # Capture stdout logs (rich logs to stdout)\n with contextlib.redirect_stdout(buf):\n handle_uncaught_app_exception(exc)\n # Capture the stdout output\n captured_output = buf.getvalue()\n\n assert \"Exception:\" in captured_output\n assert \"boom!\" in captured_output\n # Uncaught app exception is only used by the non-rich exception logging\n assert \"Uncaught app exception\" not in captured_output\n with testutil.patch_config_options({\"logger.enableRich\": False}):\n with io.StringIO() as buf:\n # Capture stdout logs\n with contextlib.redirect_stdout(buf):\n handle_uncaught_app_exception(exc)\n # Capture the stdout output\n captured_output = buf.getvalue()\n\n # With rich deactivated, the exception is not logged to stdout\n assert \"Exception:\" not in captured_output\n assert \"boom!\" not in captured_output", "def ip_extra_syshook(fnc, pdb=0, filename=None):\n\n assert isinstance(fnc, collections.Callable)\n from IPython.core import ultratb\n import time\n\n if not filename == None:\n assert isinstance(filename, str)\n pdb = 0\n\n ip_excepthook = ultratb.FormattedTB(mode='Verbose',\n color_scheme='Linux', call_pdb=pdb)\n\n fileTraceback = ultratb.FormattedTB(mode='Verbose',\n color_scheme='NoColor', call_pdb=0)\n\n # define the new excepthook\n def theexecpthook (type, value, traceback):\n fnc()\n ip_excepthook(type, value, traceback)\n # write this to a File without Colors\n if not filename == None:\n outFile = open(filename, \"a\")\n outFile.write(\"--\" + time.ctime()+\" --\\n\")\n outFile.write(fileTraceback.text(type, value, traceback))\n outFile.write(\"\\n-- --\\n\")\n outFile.close()\n\n # assign it\n sys.excepthook = theexecpthook", "def handle_exception(self, source: t.Optional[str] = None) -> \"te.NoReturn\":\n from .debug import rewrite_traceback_stack\n\n raise rewrite_traceback_stack(source=source)" ]
[ "0.63668907", "0.62963057", "0.62103194", "0.6100354", "0.60183656", "0.5926729", "0.5918545", "0.5916895", "0.5910487", "0.5765461", "0.57642436", "0.57295614", "0.5726145", "0.56801313", "0.5637029", "0.56245685", "0.5609924", "0.55958366", "0.55697525", "0.5569087", "0.55551904", "0.5498179", "0.5491105", "0.5459315", "0.5455913", "0.54450995", "0.5443837", "0.54393333", "0.5435701", "0.5435475" ]
0.64448094
0
Log a message in parallel code run via slurm. This uses the parallel logger and one file per machine. Returns
def SPYParallelLog(msg, loglevel="INFO", caller=None): numeric_level = getattr(logging, loglevel.upper(), None) if not isinstance(numeric_level, int): # Invalid string was set. raise SPYValueError(legal=f"one of: {loglevels}", varname="loglevel", actual=loglevel) if caller is None: caller = sys._getframe().f_back.f_code.co_name PrintMsg = "{caller:s} {msg:s}" logger = get_parallel_logger() logfunc = getattr(logger, loglevel.lower()) logfunc(PrintMsg.format(caller=_get_caller(caller), msg=msg))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_parallel(pid, call_method_id, run_id='gwas', kinship_method='ibd'):\n job_id = '%s_%s_%d_%d' % (run_id, kinship_method, call_method_id, pid)\n file_prefix = env.env['results_dir'] + job_id\n\n #Cluster specific parameters \n shstr = '#!/bin/bash\\n'\n shstr += '#$ -S /bin/bash\\n'\n shstr += '#$ -N %s\\n' % job_id\n #shstr += '#$ -o %s_job_$JOB_ID.out\\n' % file_prefix\n #shstr += '#$ -e %s_job_$JOB_ID.err\\n' % file_prefix\n shstr += '#$ -o %s_job.out\\n' % file_prefix\n shstr += '#$ -e %s_job.err\\n' % file_prefix\n shstr += 'source /etc/modules-env.sh\\n'\n shstr += 'module load scipy/GotoBLAS2/0.9.0\\n'\n shstr += 'module load matplotlib/1.0.0\\n'\n shstr += 'module load mysqldb/1.2.3\\n'\n\tshstr += 'module load h5py/2.0.0\\n'\n shstr += 'export GOTO_NUM_THREADS=1\\n'\n\n\n shstr += \"python %sfullseq_gwas_project.py %s %s %d %d\" % \\\n (env.env['script_dir'], run_id, kinship_method, call_method_id, pid)\n\n #shstr += \"> \" + file_prefix + \"_job.out) >& \" + file_prefix + \"_job.err\\n\"\n print '\\n', shstr, '\\n'\n script_file_name = run_id + \".sh\"\n f = open(script_file_name, 'w')\n f.write(shstr)\n f.close()\n\n #Execute qsub script\n os.system(\"qsub \" + script_file_name)", "def logprllwrite(self):\n sql = '''select to_char(time_waited, 'FM99999999999999990') retvalue \n from v$system_event se, v$event_name en where se.event(+) \n = en.name and en.name = 'log files parallel write' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def run_parallel(heritability, x_start_i, x_stop_i, cluster='usc'):\n\trun_id = 'corr_trait_sim'\n\tjob_id = ' % s_ % d_ % d' % (run_id, x_start_i, x_stop_i)\n\tfile_prefix = env.env['results_dir'] + run_id + '_' + str(x_start_i) + '_' + str(x_stop_i)\n\n\t#Cluster specific parameters\t\n\tif cluster == 'gmi': #GMI cluster.\n\t\tshstr = '#!/bin/sh\\n'\n\t\tshstr += '#$ -N %s\\n' % job_id\n\t\tshstr += \"#$ -q q.norm@blade*\\n\"\n\t\tshstr += '#$ -o %s.log\\n' % job_id\n\t\t#shstr += '#$ -cwd /home/GMI/$HOME\\n'\n\t\t#shstr += '#$ -M [email protected]\\n\\n'\n\n\telif cluster == 'usc': #USC cluster.\n\t\tshstr = \"#!/bin/csh\\n\"\n\t\tshstr += \"#PBS -l walltime=%s \\n\" % '72:00:00'\n\t\tshstr += \"#PBS -l mem=%s \\n\" % '1950mb'\n\t\tshstr += \"#PBS -q cmb\\n\"\n\t\tshstr += \"#PBS -N p%s \\n\" % job_id\n\n\tshstr += \"(python %scorr_trait_sim.py %s %d %d \" % (env.env['script_dir'], heritability, x_start_i, x_stop_i)\n\n\tshstr += \"> \" + file_prefix + \"_job.out) >& \" + file_prefix + \"_job.err\\n\"\n\tprint '\\n', shstr, '\\n'\n\tscript_file_name = run_id + \".sh\"\n\tf = open(script_file_name, 'w')\n\tf.write(shstr)\n\tf.close()\n\n\t#Execute qsub script\n\tos.system(\"qsub \" + script_file_name)", "def log(msg, level=\"IMPORTANT\", par=False, caller=None):\n if par:\n SPYParallelLog(msg, loglevel=level, caller=caller)\n else:\n SPYLog(msg, loglevel=level, caller=caller)", "def write_slurm(workloads, input_file_parameters, command_line_parameters):\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n workload_file_paths = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to SLURM with\r\n # sbatch command. Each file has one or more associated subshell files\r\n # containing contents for each thread.\r\n\r\n # Generate strings describing current workload and thread indexes for\r\n # output file names\r\n workload_index += 1\r\n workload_index_string = str(workload_index).zfill(workload_zfill_amount)\r\n file_main_name = '{0}_SBATCH_WORKLOAD_{1}'.format(NAME,\r\n workload_index_string)\r\n\r\n # When --fix_run mode is used the output and log files files already\r\n # exist. To prevent overwriting these files with new ones specific\r\n # prefix or appendix strings are added to the new output file names.\r\n appendix = '.sh'\r\n prefix = ''\r\n i = 0\r\n if command_line_parameters.fix_run:\r\n mode = 'FIX'\r\n elif command_line_parameters.compress_run == 'compress':\r\n mode = 'COMPRESS'\r\n elif command_line_parameters.compress_run == 'decompress':\r\n mode = 'DECOMPRESS'\r\n else:\r\n mode = None\r\n\r\n while mode is not None and os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n prefix = '{0}_{1}_'.format(mode, i)\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n # Generate subshell files\r\n thread_index = 0\r\n for thread_contents in workload:\r\n # Iterate over output commands of each thread and write necessary\r\n # subshell files for each\r\n out_lines = []\r\n cmds_in_thread = len(thread_contents)\r\n for i in xrange(cmds_in_thread):\r\n # Check if any modules need loading or are they loaded by previous command\r\n skip_module_loading = False\r\n if i > 0:\r\n if thread_contents[i].load_module == thread_contents[i-1].load_module:\r\n skip_module_loading = True\r\n # Check if any modules need unloading or will they be used by following command\r\n skip_module_unloading = False\r\n if i < cmds_in_thread-1:\r\n if thread_contents[i].load_module == thread_contents[i+1].load_module:\r\n skip_module_unloading = True\r\n out_lines += generate_subshell_file_contents(thread_contents[i],\r\n skip_module_loading,\r\n skip_module_unloading)\r\n\r\n\r\n # Write subshell file\r\n thread_index += 1\r\n thread_index_string = str(thread_index)\r\n fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n thread_index_string,\r\n appendix)\r\n try:\r\n out_fl = open(os.path.join(input_file_parameters.output_dir,\r\n fl_name), 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(\r\n input_file_parameters.output_dir,\r\n fl_name)))\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n\r\n # Create lines for SLURM input file by generating job-name, output,\r\n # error and array parameters based on user input\r\n status_file_basename = os.path.join(input_file_parameters.output_dir,\r\n prefix + input_file_parameters.job_name)\r\n resmng_config = list(input_file_parameters.resource_manager_params)\r\n resmng_config.append('#SBATCH --job-name={0}'.format(input_file_parameters.job_name))\r\n resmng_config.append('#SBATCH --output={0}_%A_%a.out'.format(status_file_basename))\r\n resmng_config.append('#SBATCH --error={0}_%A_%a.err'.format(status_file_basename))\r\n resmng_config.append('#SBATCH --array={0}-{1}'.format(1, len(workload)))\r\n\r\n resmng_config.append('\\n\\n')\r\n subshell_file_path = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n '\"$SLURM_ARRAY_TASK_ID\"',\r\n appendix)\r\n subshell_file_path = os.path.join(input_file_parameters.output_dir,\r\n subshell_file_path)\r\n resmng_config.append('source {0}'.format(subshell_file_path))\r\n\r\n out_fl_path = os.path.join(input_file_parameters.output_dir,file_main_name + appendix)\r\n workload_file_paths.append(out_fl_path)\r\n try:\r\n out_fl = open(out_fl_path, 'w')\r\n\r\n except IOError as emsg:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}\\n with error message:\\n{1}'\r\n .format(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix),\r\n str(emsg)))\r\n out_fl.write('\\n'.join(resmng_config))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n return workload_file_paths", "def write_slurm_runjob(name, ntasks, pmem, walltime, binary):\n\n nnodes = int(np.ceil(float(ntasks) / 32.0))\n\n runjob = open('runjob', 'w')\n runjob.write('#!/bin/bash\\n')\n runjob.write('#SBATCH --job-name={}\\n'.format(name))\n runjob.write('#SBATCH -o out_%j.log\\n')\n runjob.write('#SBATCH -e err_%j.log\\n')\n runjob.write('#SBATCH --qos=hennig-b\\n')\n runjob.write('#SBATCH --nodes={}\\n'.format(nnodes))\n runjob.write('#SBATCH --ntasks={}\\n'.format(ntasks))\n runjob.write('#SBATCH --mem-per-cpu={}\\n'.format(pmem))\n runjob.write('#SBATCH -t {}\\n\\n'.format(walltime))\n runjob.write('cd $SLURM_SUBMIT_DIR\\n\\n')\n runjob.write('module load intel/2016.0.109\\n')\n runjob.write('module load openmpi/1.10.1\\n')\n runjob.write('module load vasp/5.4.1\\n\\n')\n runjob.write('mpirun {} > job.log\\n\\n'.format(binary))\n runjob.write('echo \\'Done.\\'\\n')\n runjob.close()", "def log_cpu(level: int = INFO, msg: str = \"\", *args, **kwargs) -> None:\n if msg:\n DefaultLogger.log(level, \"%s: %s\", msg, microgp4_process_time())\n else:\n DefaultLogger.log(level, \"%s\", msg, microgp4_process_time())", "def create_slurm_file(\n slurm_filepath: Path, batch_size: int, num_batches: int, time_limit: int\n):\n slurm_string = f\"\"\"#!/usr/bin/bash\n#SBATCH --job-name=pctsp\n#SBATCH --partition=cpu-batch\n#SBATCH --ntasks=10\n#SBATCH --cpus-per-task=1\n#SBATCH --mem-per-cpu=4000\n#SBATCH --time={time_limit}:00:00\n#SBATCH --array=0-{num_batches-1}\n\n## Loop over each batch ##\nstart=$(($SLURM_ARRAY_TASK_ID * {batch_size}))\nsrun --ntasks=1 python scripts/batch_model.py $start {batch_size} \\\n\"\"\"\n slurm_filepath.write_text(slurm_string)", "def writeLog(pid):\n\tglobal processes,logfile,strikes,sleep\n\tproc = processes[pid]\n\tlogfile.write('[%s] %d %s %f%%cpu %f%%mem (over %d s): %s\\n'%(time.strftime('%b %d %H:%M:%S'),pid,proc.user,proc.cpu,proc.mem,proc.count*sleep,proc.command))", "def _run_step(self, step, new_log_file):\n logger = self.logger\n config = self.config\n cwd = os.getcwd()\n available_cores, _ = get_available_cores_and_nodes(config)\n step.cores = min(step.cores, available_cores)\n if step.min_cores is not None:\n if step.cores < step.min_cores:\n raise ValueError(\n 'Available cores for {} is below the minimum of {}'\n ''.format(step.cores, step.min_cores))\n\n missing_files = list()\n for input_file in step.inputs:\n if not os.path.exists(input_file):\n missing_files.append(input_file)\n\n if len(missing_files) > 0:\n raise OSError(\n 'input file(s) missing in step {} of {}/{}/{}: {}'.format(\n step.name, step.mpas_core.name, step.test_group.name,\n step.test_case.subdir, missing_files))\n\n test_name = step.path.replace('/', '_')\n if new_log_file:\n log_filename = '{}/{}.log'.format(cwd, step.name)\n step.log_filename = log_filename\n step_logger = None\n else:\n step_logger = logger\n log_filename = None\n with LoggingContext(name=test_name, logger=step_logger,\n log_filename=log_filename) as step_logger:\n step.logger = step_logger\n os.chdir(step.work_dir)\n step.run()\n\n missing_files = list()\n for output_file in step.outputs:\n if not os.path.exists(output_file):\n missing_files.append(output_file)\n\n if len(missing_files) > 0:\n raise OSError(\n 'output file(s) missing in step {} of {}/{}/{}: {}'.format(\n step.name, step.mpas_core.name, step.test_group.name,\n step.test_case.subdir, missing_files))", "def slurm(ctx, alloc, nodes, memory, walltime, feature, conda_env, module,\n stdout_path, verbose):\n\n name = ctx.obj['NAME']\n tech = ctx.obj['TECH']\n points = ctx.obj['POINTS']\n sam_files = ctx.obj['SAM_FILES']\n res_file = ctx.obj['RES_FILE']\n sites_per_worker = ctx.obj['SITES_PER_WORKER']\n dirout, fout = os.path.split(ctx.obj['OUT_FPATH'])\n logdir = ctx.obj['LOGDIR']\n output_request = ctx.obj['OUTPUT_REQUEST']\n site_data = ctx.obj['SITE_DATA']\n max_workers = ctx.obj['MAX_WORKERS']\n mem_util_lim = ctx.obj['MEM_UTIL_LIM']\n timeout = ctx.obj['TIMEOUT']\n curtailment = ctx.obj['CURTAILMENT']\n gid_map = ctx.obj['GID_MAP']\n verbose = any([verbose, ctx.obj['VERBOSE']])\n\n slurm_manager = ctx.obj.get('SLURM_MANAGER', None)\n if slurm_manager is None:\n slurm_manager = SLURM()\n ctx.obj['SLURM_MANAGER'] = slurm_manager\n\n pc = get_node_pc(points, sam_files, tech, res_file, nodes)\n\n for i, split in enumerate(pc):\n node_name, fout_node = get_node_name_fout(name, fout, i, pc,\n hpc='slurm')\n\n node_fpath = os.path.join(dirout, fout_node)\n cmd = get_node_cmd(node_name, tech, sam_files, res_file, node_fpath,\n points=points,\n points_range=split.split_range,\n sites_per_worker=sites_per_worker,\n max_workers=max_workers,\n logdir=logdir,\n output_request=output_request,\n site_data=site_data,\n mem_util_lim=mem_util_lim,\n timeout=timeout,\n curtailment=curtailment,\n gid_map=gid_map,\n verbose=verbose)\n\n status = Status.retrieve_job_status(dirout, 'generation', node_name,\n hardware='eagle',\n subprocess_manager=slurm_manager)\n\n if status == 'successful':\n msg = ('Job \"{}\" is successful in status json found in \"{}\", '\n 'not re-running.'\n .format(node_name, dirout))\n elif 'fail' not in str(status).lower() and status is not None:\n msg = ('Job \"{}\" was found with status \"{}\", not resubmitting'\n .format(node_name, status))\n else:\n logger.info('Running reV generation on SLURM with node name \"{}\" '\n 'for {} (points range: {}).'\n .format(node_name, pc, split.split_range))\n # create and submit the SLURM job\n out = slurm_manager.sbatch(cmd,\n alloc=alloc,\n memory=memory,\n walltime=walltime,\n feature=feature,\n name=node_name,\n stdout_path=stdout_path,\n conda_env=conda_env,\n module=module)[0]\n if out:\n msg = ('Kicked off reV generation job \"{}\" (SLURM jobid #{}).'\n .format(node_name, out))\n # add job to reV status file.\n Status.add_job(\n dirout, 'generation', node_name, replace=True,\n job_attrs={'job_id': out, 'hardware': 'eagle',\n 'fout': fout_node, 'dirout': dirout})\n\n click.echo(msg)\n logger.info(msg)", "def magic_runlog(self, parameter_s =''):\n\n for f in parameter_s.split():\n self.safe_execfile(f,self.user_ns,self.user_ns,islog=1)", "def system_parallel(cmdL, nproc=None, verbose=True):\n if nproc is None:\n nproc = multiprocessing.cpu_count()\n sh_filename = '_run_parallel_' + hashlib.md5('\\n'.join(cmdL).encode('utf-8')).hexdigest()\n with open(sh_filename, 'wt') as f:\n f.write('\\n'.join(cmdL))\n out = subprocess.check_output('parallel -j%d %s--keep-order < %s' % (nproc, '--verbose ' if verbose else '', sh_filename), shell=True)\n out = out.decode('utf-8')\n if verbose:\n print('-'*80)\n print('system_parallel output:')\n print('-'*80)\n print(out)\n os.remove(sh_filename)\n return out", "def prepare_parafly_slurm_job_script(sBasename_job, sBasename_parafly, sDirectory_job, sEmail, iWalltime_in = None, nNode_in = None, nThread_in=None, sJob_name_in =None, sPython_env_in =None, sQueue_in=None):\n if iWalltime_in is not None:\n iWalltime = iWalltime_in \n else:\n iWalltime = 2\n if nNode_in is not None:\n iNode = nNode_in \n else:\n iNode = 1\n if nThread_in is not None:\n nThread = nThread_in \n else:\n nThread = 40\n \n if sJob_name_in is not None:\n sJob_name = sJob_name_in \n else:\n sJob_name = 'parafly'\n if sPython_env_in is not None:\n sPython_env = sPython_env_in \n else:\n sPython_env = 'base'\n \n if sQueue_in is not None:\n sQueue = sQueue_in \n else:\n sQueue = 'short'\n \n sWalltime =\"{:0d}\".format(iWalltime )\n sNode = \"{:0d}\".format(iNode )\n sThread = \"{:0d}\".format(nThread )\n \n os.chdir(sDirectory_job)\n \n ofs = open(sBasename_job,\"w\") #write mode \n sLine = '#!/bin/bash' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --account=esmd' + '\\n'\n ofs.write( sLine ) \n\n #sLine = '#SBATCH --begin=now+1minutes' + '\\n'\n #ofs.write( sLine ) \n\n sLine = '#SBATCH --cpus-per-task=1 ' + '\\n'\n ofs.write( sLine ) \n\n sLine = '#SBATCH --dependency=singleton ' + '\\n'\n ofs.write( sLine )\n sLine = '#SBATCH --error=stderr_%j.err' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --job-name=' + sJob_name + ' # create a name for your job' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --mail-type=ALL' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --mail-user=' + sEmail + '\\n'\n ofs.write( sLine ) \n\n sLine = '#SBATCH --nodes=' + sNode + ' # node count' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --ntasks=' + sThread + ' # total number of tasks' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --output=stdout_%j.out' + '\\n'\n ofs.write( sLine ) \n\n sLine = '#SBATCH --partition=' + sQueue + '\\n' #can be improved here\n ofs.write( sLine ) \n sLine = '#SBATCH --time=' + sWalltime +':00:00 # total run time limit (HH:MM:SS)' + '\\n'\n ofs.write( sLine ) \n\n sLine = 'module purge' + '\\n'\n ofs.write( sLine ) \n sLine = 'module load parafly/2013' + '\\n'\n ofs.write( sLine ) \n sLine = 'module load anaconda3/2019.03' + '\\n'\n ofs.write( sLine ) \n sLine = 'source /share/apps/anaconda3/2019.03/etc/profile.d/conda.sh' + '\\n'\n ofs.write( sLine ) \n sLine = 'unset PYTHONHOME' + '\\n'\n ofs.write( sLine ) \n sLine = 'conda activate ' + sPython_env + '\\n'\n ofs.write( sLine ) \n\n sLine = 'ParaFly -c ' + sBasename_parafly + ' -CPU ' + sThread + ' -failed_cmds rerun.txt' + '\\n'\n ofs.write( sLine ) \n \n sLine = 'echo \" Job \" ' + '${SLURM_JOBID}' + ' is launched' + '\\n'\n ofs.write( sLine ) \n\n sLine = 'conda deactivate' + '\\n'\n ofs.write( sLine ) \n \n sLine = 'echo \"Finished\"' + '\\n'\n ofs.write( sLine ) \n ofs.close() \n \n return", "def main(workdir):\n dir = os.path.expanduser(workdir)\n \n #read the .dat file\n f = open('{}smi.dat'.format(dir))\n par = imp.load_source('par', '', f)\n \n #make a sdf file for visualization\n output = pybel.Outputfile(\"sdf\", dir + \"species.sdf\",overwrite=True)\n for name in par.smiles:\n smi = par.smiles[name]\n obmol = pybel.readstring(\"smi\",smi)\n output.write(obmol)\n output.close()\n \n #list with the jobs that need to be done\n jobs = []\n \n #iterate the input files\n for name in par.smiles:\n #name = input_file.replace('.inp','') #name of the calculation\n test_dir = dir + name #location where the calculations will be done\n if not os.path.exists(test_dir):\n os.mkdir(test_dir)\n \n #copy the input file to the working directory\n write_input_file(par,name,par.smiles[name],test_dir + '/input.inp')\n job = workdir + name + '/'\n jobs.append(job)\n \n run_threads(jobs, 'eric', max_running = 3)", "def submit_spot_batch_job( argv ):\n import logging.config\n if len(sys.argv) == 1:\n print 'ERROR: Missing log configuration file, first argument must be path/name.ext of the log configuration file'\n sys.exit(8)\n logging.config.fileConfig( sys.argv[1], disable_existing_loggers=False)\n logger = logging.getLogger(__name__)\n \n if len(sys.argv) == 2:\n logger.error( 'ERROR: Missing Batch Job Parm file, second argument must be path/name.ext of the log Batch Job Parm file' )\n sys.exit(8) \n \n try:\n logger.info(\"Starting\")\n \n path_batch_job_parm_file = sys.argv[2]\n if len(sys.argv) == 4: path_user_job_parm_file = sys.argv[3]\n else: path_user_job_parm_file = None\n \n with open( path_batch_job_parm_file ) as parm_file:\n raw_batch_job_parm_item = parm_file.read()\n \n if path_user_job_parm_file != None: \n with open( path_user_job_parm_file ) as parm_file:\n raw_user_job_parm_item = parm_file.read()\n else: raw_user_job_parm_item = None\n\n batch_job_parm_item = BatchJobParmItem( stringParmFile=raw_batch_job_parm_item )\n\n spot_master_sqs_message_durable = SqsMessageDurable( awsspotbatch.common.const.SPOT_MASTER_QUEUE_NAME, \n batch_job_parm_item.primary_region_name, \n profile_name=batch_job_parm_item.profile_name )\n \n spot_master_uuid = str(uuid.uuid1())\n logger.info('Submitting test batch message, spot_master_uuid=' + spot_master_uuid )\n spot_master_msg = SpotMasterMsg( spot_master_uuid=spot_master_uuid, spot_master_msg_type=SpotMasterMsg.TYPE_SUBMIT_BATCH,\n raw_batch_job_parm_item=raw_batch_job_parm_item, raw_user_job_parm_item=raw_user_job_parm_item)\n message_attributes = create_microsvc_message_attributes( awsspotbatch.common.const.MICROSVC_MASTER_CLASSNAME_SpotMasterMessageSubmitBatch )\n spot_master_sqs_message_durable.send_message( spot_master_msg.to_json(),\n message_attributes=message_attributes )\n logger.info( 'Completed Successfully' )\n\n except StandardError as e:\n logger.error( e )\n logger.error( traceback.format_exc() )\n sys.exit(8)", "def _log(self, lvl, msg):\n log.log(lvl, \"Proc[{0}] : {1}\".format(self.name, msg))", "def main(log: str = \"WARNING\", log_file: Optional[str] = None):\n log_level = getattr(logging, log.upper(), None)\n if not isinstance(log_level, int):\n raise ValueError(f\"Invalid log level `{log}`\")\n logging.basicConfig(level=log_level, filename=log_file)\n # ~\\~ begin <<lit/mpi_oscillator.md|example-mpi-main>>[init]\n initialize()\n client = Client()\n # ~\\~ end\n # ~\\~ begin <<lit/mpi_oscillator.md|example-mpi-main>>[1]\n OMEGA0 = 1.0\n ZETA = 0.5\n H = 0.001\n system = harmonic_oscillator(OMEGA0, ZETA)\n # ~\\~ end\n # ~\\~ begin <<lit/mpi_oscillator.md|example-mpi-main>>[2]\n y0 = np.array([1.0, 0.0])\n t = np.linspace(0.0, 15.0, 20)\n archive = Path(\"./output/euler\")\n underdamped_solution(OMEGA0, ZETA)(t)\n tabulate(Fine(archive, \"fine\", 0, system, H).solution, LiteralExpr(y0), t)\n\n # euler_files = archive.glob(\"*.h5\")\n # ~\\~ end\n # ~\\~ begin <<lit/mpi_oscillator.md|example-mpi-main>>[3]\n archive = Path(\"./output/parareal\")\n p = Parareal(\n client,\n lambda n: Coarse(n, system).solution,\n lambda n: Fine(archive, \"fine\", n, system, H).solution)\n jobs = p.schedule(LiteralExpr(y0), t)\n history = History(archive)\n p.wait(jobs, history.convergence_test)\n # ~\\~ end", "def batch_jobs(func, inputs, ncores=1, logger=None):\n\n if logger is None:\n logger = logging.getLogger()\n\n root_handlers = logging.root.handlers\n for handler in root_handlers:\n if \"baseFilename\" in handler.__dict__:\n logformat = handler.formatter._fmt\n level = handler.level\n logging.root.handlers = []\n\n pool = multiprocessing.Pool(\n ncores, initializer=initialize_mp_handler, initargs=(level, logformat)\n )\n\n output = zip(*pool.map(func, inputs))\n\n logging.root.handlers = root_handlers\n\n for i, fn in enumerate(glob.glob(\"./mp-handler-*.log\")):\n with open(fn) as f:\n logger.info(\"Log from thread {0}:\\n{1}\".format(i, f.read()))\n os.remove(fn)\n\n return output", "def run_info(self):\n return \"MPI: %d, OMP: %d\" % (self.mpi_procs, self.omp_threads)", "def _consolidate_mp_logs(self):\n for i, fn in enumerate(self.logfiles):\n with open(fn) as f:\n logger.info(\"Log from thread {0}:\\n{1}\".format(i, f.read()))\n open(fn, \"w\").write(\"\")", "def log(string):\n\n print string\n\n# data and time\n dt = datetime.now().strftime(\"%b %d %H:%M:%S\")\n\n# check if log file exist / if not create it\n check_logf = os.path.isfile(logfile)\n if check_logf == False:\n os.system(\"touch %s\" % (logfile))\n firstlog = \"%s %s jadm: jadm log file was created!\" % (dt, os.uname()[1])\n os.system(\"echo '%s' > %s\" % (firstlog, logfile))\n\n# applay string to log file\n string = \"%s %s jadm: %s\" % (dt, os.uname()[1], string)\n os.system(\"echo '%s' >> %s\" % (string, logfile))", "def run_gypsum_multiprocessing(gypsum_log_path, gypsum_params,\n gypsum_timeout_limit):\n current_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n gypsum_dir = (\n str(current_dir) + os.sep + \"convert_files\" + os.sep + \"gypsum_dl\" + os.sep\n )\n gypsum_gypsum_dir = str(gypsum_dir) + os.sep + \"gypsum_dl\" + os.sep\n sys.path.extend([current_dir, gypsum_dir, gypsum_gypsum_dir])\n\n\n lig_id = gypsum_params[\"source\"].split(os.sep)[-1].replace(\".smi\", \"\")\n log_file = \"{}{}_log.txt\".format(gypsum_log_path, lig_id)\n\n try:\n with StdoutRedirection(log_file):\n func_timeout(gypsum_timeout_limit, prepare_molecules, args=(gypsum_params,))\n\n sys.stdout.flush()\n except:\n # This Ligand Timed out\n return lig_id\n\n # Check if it worked if it failed return lig_id if it works return None\n did_gypsum_complete = check_gypsum_log_did_complete(log_file)\n if did_gypsum_complete in [None, False]:\n # Failed to convert\n return lig_id\n\n return None", "def print_job(message):\n print(threading.currentThread().getName(), 'Starting')\n sleep(1)\n print('From within print_job : ' + str(message))\n print(threading.currentThread().getName(), 'Ending')", "def get_logs(self):\n logs_directory = self.protocol_config['logs']\n protocol_name = self.protocol_config['protocol']\n os.system(f'fab -f Execution/fabfile.py get_logs:{logs_directory} --parallel | '\n f' tee WebApp/ExecutionLogs/{protocol_name}.log')", "def _run_parallel(parameters):\n\n # make parallel context global\n global pc\n\n print parameters\n # create parallel context instance\n pc = h.ParallelContext()\n\n print 'i am', pc.id(), 'of', pc.nhost()\n # start workers, begins an infinitely loop where master workers posts jobs and workers pull jobs until all jobs are finished\n pc.runworker()\n \n # print len(parameters)\n # # # distribute experiment and parameters to workers\n for param in parameters:\n # print len(parameters)\n # print param\n pc.submit(_f_parallel, param)\n # print param\n\n # # continue runnning until all workers are finished\n while pc.working():\n print pc.id(), 'is working'\n\n # # close parallel context \n pc.done()", "def main():\n flags = parser_create()\n config_data = config_loader_yaml(flags.config_file)\n loggers_config = get_loggers_config(config_data)\n logging_queue = multiprocessing.Queue()\n logging_worker = LoggingWorker(loggers_config, logging_queue)\n logging_worker.start()\n\n class_name = \"\"\n function_name = inspect.stack()[0][3]\n\n for i in range(5):\n log_message(logging_queue, 'DEBUG', __name__, class_name, function_name, 'Message ' + str(i))\n log_message(logging_queue, 'INFO', __name__, class_name, function_name, 'Message ' + str(i))\n log_message(logging_queue, 'WARNING', __name__, class_name, function_name, 'Message ' + str(i))\n log_message(logging_queue, 'ERROR', __name__, class_name, function_name, 'Message ' + str(i))\n log_message(logging_queue, 'CRITICAL', __name__, class_name, function_name, 'Message ' + str(i))\n log_message(logging_queue, 'Unknown', __name__, class_name, function_name, 'Message ' + str(i))\n\n logging_queue.put(None)\n logging_worker.join()", "def _log_process(ip_or_url,\n username,\n password,\n database,\n series_name,\n port=8086,\n polling_interval=1,\n retention_duration=MEASUREMENTS_RETENTION_DURATION,\n **tags):\n logger = _logger()\n logger.info('Logging GPU to Database {}'.format(ip_or_url))\n\n kwargs = {'series_name': series_name,\n 'polling_interval': polling_interval,\n 'retention_duration': retention_duration}\n kwargs.update(tags)\n p = Process(target=_start_logger_process,\n args=(ip_or_url,\n port,\n username,\n password,\n database),\n kwargs=kwargs)\n p.start()\n yield p\n p.terminate()\n p.join()", "def batch_mc_production_check(\n dict_jobids_all_stages,\n log_directory,\n prod_id,\n prod_config_file,\n batch_config,\n logs_files,\n):\n debug_log = {}\n all_pipeline_jobs = []\n\n source_env = batch_config[\"source_environment\"]\n slurm_account = batch_config[\"slurm_account\"]\n\n for stage, jobids in dict_jobids_all_stages.items():\n all_pipeline_jobs.append(jobids)\n debug_log[f\"SUMMARY_{stage}\"] = jobids\n\n all_pipeline_jobs = \",\".join(all_pipeline_jobs)\n\n # Copy lstmcpipe config used to log directory\n shutil.copyfile(Path(prod_config_file).resolve(), log_directory.joinpath(f\"config_MC_prod_{prod_id}.yml\"))\n\n # Save machine info into the check file\n check_prod_file = log_directory.joinpath(f\"check_MC_{prod_id}.txt\").absolute().as_posix()\n\n cmd_wrap = f\"touch {check_prod_file}; \"\n cmd_wrap += (\n f\"sacct --format=jobid,jobname,nodelist,cputime,state,exitcode,avediskread,maxdiskread,avediskwrite,\"\n f\"maxdiskwrite,AveVMSize,MaxVMSize,avecpufreq,reqmem -j {all_pipeline_jobs} >> {check_prod_file}; \"\n f\"mv slurm-* IRFFITSWriter.provenance.log {log_directory.absolute().as_posix()} \"\n )\n\n batch_cmd = \"sbatch -p short --parsable\"\n if slurm_account != \"\":\n batch_cmd += f\" -A {slurm_account}\"\n batch_cmd += f\" --dependency=afterok:{all_pipeline_jobs} -J prod_check\" f' --wrap=\"{source_env} {cmd_wrap}\"'\n\n jobid = os.popen(batch_cmd).read().strip(\"\\n\")\n log.info(f\"Submitted batch CHECK-job {jobid}\")\n debug_log[f\"prod_check_{jobid}\"] = batch_cmd\n\n save_log_to_file(debug_log, logs_files[\"debug_file\"], workflow_step=\"check_full_workflow\")\n\n return jobid", "def main():\n args = parse_args()\n level = logging.INFO\n if args.quiet:\n level = logging.ERROR\n if args.debug:\n level = logging.DEBUG\n LOG.setLevel(level)\n\n log_handler = logging.handlers.RotatingFileHandler(\n args.logfile, maxBytes=LOG_MAX_BYTES, backupCount=LOG_BACKUP_COUNT)\n OUTPUT.addHandler(log_handler)\n log_handler.setFormatter(logging.Formatter('%(asctime)s %(message)s', datefmt=DATE_FORMAT))\n\n prefix, ext = os.path.splitext(args.logfile)\n error_log_handler = logging.handlers.RotatingFileHandler(\n prefix + '-error' + ext, maxBytes=LOG_MAX_BYTES, backupCount=LOG_BACKUP_COUNT)\n error_log_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s', datefmt=DATE_FORMAT))\n error_log_handler.setLevel(logging.WARNING)\n LOG.addHandler(error_log_handler)\n\n processors = []\n LOG.debug(\"Initializing processor objects\")\n for host in args.hosts:\n try:\n processors.append(Processor(host))\n except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError) as error:\n LOG.warning(\"Ignoring %s due to %s\", host, error)\n continue\n if len(args.hosts) != len(processors):\n LOG.error(\"%d host(s) will be ignored for this session.\", len(args.hosts) - len(processors))\n\n OUTPUT.info(\"(Process started)\")\n while True:\n try:\n for proc in processors:\n proc.sample()\n proc.compare()\n except (KeyboardInterrupt, SystemExit) as error:\n OUTPUT.info(\"(Process exiting)\")\n sys.exit(0)\n LOG.debug(\"Sleeping %s %0.1f\", args.interval)\n time.sleep(args.interval)" ]
[ "0.6007545", "0.5688902", "0.55962497", "0.559282", "0.55060256", "0.54812354", "0.5468144", "0.5374238", "0.5281202", "0.5265701", "0.5239216", "0.51966226", "0.51679915", "0.51659477", "0.51620716", "0.5127934", "0.511845", "0.510981", "0.51014036", "0.50684947", "0.5053366", "0.50384694", "0.5026759", "0.502039", "0.5014567", "0.50070596", "0.50025403", "0.50012964", "0.4999224", "0.49975932" ]
0.61517555
0
Capture details about the view_func that is about to execute
def process_view(self, request, view_func, view_args, view_kwargs): if not scout_config.value("monitor"): return tracked_request = TrackedRequest.instance() tracked_request.is_real_request = True track_request_view_data(request, tracked_request) span = tracked_request.current_span() if span is not None: span.operation = get_operation_name(request)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_exec_time(view_func):\r\n @wraps(view_func, assigned=available_attrs(view_func))\r\n def _wrapped_view_func(request, *args, **kwargs):\r\n t1 = time.time()\r\n back_view = view_func(request, *args, **kwargs)\r\n print request.get_full_path(), time.time() - t1\r\n return back_view\r\n return _wrapped_view_func", "def print_trace(view_func):\r\n @wraps(view_func, assigned=available_attrs(view_func))\r\n def _wrapped_view_func(request, *args, **kwargs):\r\n try:\r\n return view_func(request, *args, **kwargs)\r\n except:\r\n import traceback\r\n traceback.print_exc()\r\n return _wrapped_view_func", "def process_view(self, request, view_func, view_args, view_kwargs):\n\n # Get undecorated function for require_login decorator\n if (isinstance(view_func, _CheckLogin)):\n view_func = view_func.view_func\n\n func_name = '.'.join((view_func.__module__, view_func.func_name))\n func_args = [','.join(view_args)]\n if func_args[0]:\n func_args.append(', ')\n func_args.append(','.join(\n [\"%s=%s\"%(k, v) for k, v in view_kwargs.items()]))\n LogDB(event_type='HR',\n info=\"Request to %s proceed by %s (%s)\"%(request.path,\n func_name, ''.join(func_args))\n ).save()\n return None", "def process_view(self, request, view_func, *args, **kwargs):\n # Do not trace if the url is blacklisted\n if utils.disable_tracing_url(request.path, self._blacklist_paths):\n return\n\n try:\n # Get the current span and set the span name to the current\n # function name of the request.\n tracer = _get_current_tracer()\n span = tracer.current_span()\n span.name = utils.get_func_name(view_func)\n except Exception: # pragma: NO COVER\n log.error('Failed to trace request', exc_info=True)", "def view_function(self,v):\n return v", "def process_view(self, request, view_func, *view_args, **view_kwargs):\n # Nothing to do when not demo mode.\n if not settings.DEMO_MODE:\n return None\n\n if view_func in self.safe_views:\n return None # continue handling request\n return HttpResponseForbidden()", "def _wrapped_view(request, *args, **kwargs):\n return view_func(request, *args, **kwargs)", "def profile_view(self, view):\n def profiled(request, *args, **kwargs):\n t1 = time.clock()\n response = view(request, *args, **kwargs)\n t2 = time.clock()\n log = lambda *args: logger.debug(u\"\".join(map(unicode, args)))\n log(\"profiled view:\\t\\t\", view.__name__)\n log(\"url:\\t\\t\\t\", request.get_full_path())\n log(\"subdomain:\\t\\t\", request.subdomain)\n log(\"get:\\t\\t\\t\", u\"\\n\\t\\t\\t\".join(\n u\"{0} => {1}\".format(k, request.GET.getlist(k))\n for k in request.GET))\n log(\"post:\\t\\t\\t\", u\"\\n\\t\\t\\t\".join(\n u\"{0} => {1}\".format(k, request.POST.getlist(k))\n for k in request.POST))\n log(\"arguments:\\t\\t\", args)\n log(\"named arguments:\\t\", kwargs)\n log(\"execution time:\\t\\t\", t2 - t1)\n log(\"query number:\\t\\t\", len(connection.queries))\n return response\n return wraps(view)(profiled)", "def process_view(self, request, view_func, view_args, view_kwargs):\n # Url exception(s)\n \n for exc_pattern in settings.SMART_ACCESS_CONTROL_EXCEPTIONS:\n if re.match(exc_pattern, request.path):\n return None\n\n if hasattr(view_func, 'resolve'):\n view_func = view_func.resolve(request)\n\n try:\n if view_func:\n permission_set = self.get_permset(request)\n\n # given a set of permissions, and a rule for access checking\n # apply the rules to the permission set with the current request parameters\n# import rpdb2\n# rpdb2.start_embedded_debugger(\"a\")\n if permission_set:\n if permission_set.evaluate(request, view_func, view_args, view_kwargs):\n print \"And permitted for \", view_func.__name__, request.principal\n return None\n print \"Permission denied for \", view_func.__name__, request.principal\n\n # otherwise, this will fail\n except:\n print \"Exception: Permission denied for \", view_func.__name__, request.principal\n import sys, traceback\n traceback.print_exc(file=sys.stderr)\n raise PermissionDenied\n raise PermissionDenied", "def __call__(request):", "def __init__( viewname, view ):", "def local_views():\n\tpass", "def _showView(self, win, fn=None):\n raise RuntimeError('Not implemented')", "def add_view( *args, **kwargs ):", "def view_output(view):\n\n @functools.wraps(view)\n def wrapper():\n output = io.StringIO()\n try:\n view(output)\n print(\"\", file=output)\n except Exception as error:\n print(\"\", file=output)\n if hasattr(error, \"stack\"):\n print(error.stack, file=output)\n else:\n traceback.print_exception(*sys.exc_info(), file=output)\n return output.getvalue()\n\n return wrapper", "def generate_viewfunc(final_viewfunc, middlewares):\n accepted_kwargs = []\n for param in inspect.signature(final_viewfunc).parameters.values():\n if param.kind == param.POSITIONAL_ONLY:\n raise ValueError(\"%s expects positional argument %s\"\n % (final_viewfunc, param.name))\n elif param.kind == param.VAR_POSITIONAL:\n raise ValueError(\"%s expects var-positional argument %s\"\n % (final_viewfunc, param.name))\n elif param.kind == param.VAR_KEYWORD:\n raise ValueError(\"%s expects var-keyword argument %s\"\n % (final_viewfunc, param.name))\n\n accepted_kwargs.append(param.name)\n\n wants_ldap = 'ldap' in accepted_kwargs\n\n def caller():\n kwargs = {\n \"log\": APP.logger,\n }\n\n for middleware in middlewares:\n output = middleware.request_infos()\n if output:\n APP.logger.debug(\"Middleware %s generated kwargs: %s\",\n middleware,\n output)\n kwargs.update(output)\n\n result = middleware.intermediate_viewfunc()\n if result is not None:\n APP.logger.debug(\"Middleware %s returned: %s\",\n middleware,\n result)\n return result\n\n # Build the LDAP client\n if wants_ldap:\n ldap_client = LdapClient(\n APP.logger,\n kwargs.get('user_tokeninfo'),\n kwargs.get('client_info'),\n )\n kwargs['ldap'] = ldap_client\n\n APP.logger.debug(\"Got args %s for viewfunc %s pre-filter\",\n kwargs,\n final_viewfunc)\n\n kwargs = {key: kwargs[key] for key in kwargs if key in accepted_kwargs}\n\n APP.logger.debug(\"Calling final viewfunc %s with args %s\",\n final_viewfunc,\n kwargs)\n res = final_viewfunc(**kwargs)\n resp = {}\n if isinstance(res, tuple):\n res, resp = res\n resp['result'] = res\n\n headers = {\n \"Content-Security-Policy\": \"default-src: 'none'\",\n \"Feature-Policy\": \"\",\n \"X-Frame-Options\": \"DENY\",\n \"X-Xss-Protection\": \"1; mode=block\",\n \"X-Content-Type-Options\": \"nosniff\",\n \"Referrer-Policy\": \"no-referrer\",\n }\n for middleware in middlewares:\n new_resp = middleware.manipulate_response(resp, kwargs)\n extra_headers = None\n if new_resp and isinstance(new_resp, tuple):\n new_resp, extra_headers = new_resp\n if new_resp is not None:\n APP.logger.debug(\"Middleware %s manipulated response\",\n middleware)\n resp = new_resp\n if extra_headers is not None:\n APP.logger.debug(\"Middleware %s added headers: %s\",\n middleware, extra_headers)\n headers.update(extra_headers)\n\n return resp, headers\n return caller", "def run_view(self, expanded, unexpanded) :\n\t\treturn self.manage_view_properties(expanded, unexpanded, \"\", perms = \"View\")", "def process_view(self, request, view_func, view_args, view_kwargs):\n if view_func.__name__ == settings.COUNTER_ADS_VIEW:\n counter_key = view_kwargs.get(\"pk\")\n ads_counter = Counter(view_func.__name__, counter_key)\n ads_counter.hit(request)", "def view(self):", "def method_name(self) -> str:\n if isinstance(self.view_func, str):\n return self.view_func\n return self.view_func.__name__", "def get_viewfunc(self):\n # Generate some common middlewares\n if self.user_auth is not False:\n self.middlewares.append(UserAuthMiddleware(self.user_auth))\n\n if self.client_auth is not False:\n self.middlewares.append(ClientAuthMiddleware())\n\n if self.arguments:\n self.middlewares.append(ArgumentMiddleware(self.arguments))\n\n if self.paged:\n self.middlewares.append(PagingMiddleware())\n\n # Return the viewfunc, wrapped with requested middlewares\n return generate_viewfunc(self.viewfunc, self.middlewares)", "def user_view_for(anon_view_func):\n\tdef decorator(view):\n\t\tanon_view_func.user_func = view\n\t\treturn view\n\treturn decorator", "def getViews(read):\n ...", "def _view(self, request, **kwargs):\n return self._dispatch(request, **kwargs)", "def get_view_function(key):\n return lambda: \"OK\" if SERVICES[key].restart() is None else \"OK2\"", "def do_view_data(self, *args):\n with suppress(SystemExit):\n if str(*args).split(' ')[0] == '':\n command = self.cli.view_parser.parse_args(*args)\n else:\n command = self.cli.view_parser.parse_args(str(*args).split(' '))\n command.func(**vars(command))", "def some(func):\n def wrapper(* args,** kwargs):\n logging.basicConfig(filename='error.log',level=logging.DEBUG)\n logging.info(request.url + \" : \" + str(request.remote_addr)+\" using function \"+func.__name__ )\n return func(* args,** kwargs)\n\n wrapper.__name__ = func.__name__ \n return wrapper", "def decorator(view_func):\n @wraps(view_func, assigned=available_attrs(view_func))\n def _wrapped_view(request, *args, **kwargs):\n dao = DataStreamDBDAO()\n query, total_resources = dao.query(account_id=request.account.id, language=request.user.language)\n if total_resources == 0 or request.GET.get('test-no-dataviews', False) == '1':\n raise AnyDatastreamRequiredException()\n return view_func(request, *args, **kwargs)\n\n return _wrapped_view", "def view(*args, **kwargs):\n return mapped_method()", "def do_show(self, args):\n\n func = getattr(args, \"func\", None)\n\n if func is not None:\n func(self, args)\n else:\n self.do_help(\"show\")" ]
[ "0.7136872", "0.698362", "0.69041514", "0.6508647", "0.6270444", "0.62283134", "0.62076545", "0.609943", "0.6065147", "0.5921364", "0.5820853", "0.5762659", "0.57506853", "0.5735391", "0.5725252", "0.5722089", "0.5718177", "0.56821394", "0.5678318", "0.5665546", "0.56598353", "0.56309414", "0.56289166", "0.5623426", "0.558923", "0.55787855", "0.55540526", "0.5551752", "0.55445313", "0.5544526" ]
0.70100695
1
Adds a new ToDo to the list
def add_todo(): task = flask.request.form["task"] todos.append(ToDo(task)) return "success"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_items(list_id):\n\n item_title = request.form[\"item_title\"]\n item_description = request.form[\"item_description\"]\n user_id = session.get(\"user_id\")\n\n if not user_id:\n raise Exception(\"No user logged in.\")\n\n to_do_list = ToDoList.query.get(list_id)\n\n new_item = ToDoItem(item_title=item_title,\n item_description=item_description)\n to_do_list.to_do_items.append(new_item)\n db.session.add(new_item)\n db.session.commit()\n\n return redirect(f\"/lists/{list_id}\")", "def add_list(user_id):\n\n list_title = request.form[\"list_title\"]\n user_id = session.get(\"user_id\")\n\n if not user_id:\n raise Exception(\"No user logged in.\")\n\n to_do_list = ToDoList.query.filter_by(list_title=list_title).first()\n\n if to_do_list:\n flash(\"List name already exists. Please select a new name.\")\n return redirect(\"/dashboard\")\n\n new_list = ToDoList(list_title=list_title, user_id=user_id)\n \n db.session.add(new_list)\n db.session.commit()\n \n return redirect(\"/dashboard\")", "def _add_todo_items(self):\n\n todo_list = ToDoList(day=self.day, user=self.user.user.rolllistuser)\n todo_list.save()\n\n items = [\n 'feed the cats',\n 'drive to work',\n 'read a book',\n 'eat some food',\n ]\n todo_items = []\n for item in items:\n new_item = ToDoItem(\n title=item,\n to_do_list=todo_list,\n priority=1\n )\n new_item.save()\n todo_items.append(new_item)\n return todo_items", "def write_todo(self, todo):\n if todo != None:\n print 'added \"%s\"' % todo.text\n self.new_items.append(todo)", "def test_can_add_todo_list():\n scheduler = Scheduler()\n new_id = uuid.uuid4()\n\n scheduler.add_todo_list(new_id, \"my todo list\")\n\n Is(scheduler.get_amount_of_todo_lists()).not_none.integer.has_same_truth_of(1)", "def add_task(action, user):\n \n item = Item()\n item.description = action['what'].get('description', '')\n item.id = action['what']['id']\n item.position = action['what']['position']\n \n l = List.objects.get(id=action['listId'])\n verify_permission(l, user)\n \n l.items.append(item)\n l.save()\n \n return l", "def add_item(self, text):\n\t\tnew_todo = self.todolist.add(text)\n\t\tself.store.append((new_todo.id, text))", "def add_item_to_list(self, todolist):\n\t\tnote = self.get_all_text_view_text(self.textview_add)\n\t\ttodolist.add_item(note)\n\t\tself.textview_add.get_buffer().set_text('')", "def do_todo_create(self, arg):\n try:\n my_list = arg[\"<list_name>\"]\n my_list_str = \" \".join(my_list) \n app.ToDoApp.to_create_todo(my_list_str)\n \n except ValueError as e:\n cprint(e, 'red')", "def create_item():\n\n data = request.get_json()\n title = data.get(\"title\", None)\n description = data.get(\"description\", None)\n due_date = data.get(\"due_date\", None)\n list_id = data.get(\"list_id\", None)\n\n if title is None or list_id is None:\n return abort(400, description=f\"List ID and title cannot be null!\")\n\n list_to_append = ToDoList.query.filter(ToDoList.id == list_id).first()\n\n if list_to_append is None:\n return abort(404, description=f\"List ID {list_id} does not exist!\")\n\n if due_date is not None:\n try:\n due_date = datetime.datetime.strptime(due_date, DATE_FORMAT)\n except ValueError:\n return abort(400, description=f\"Date format must be YYYY-MM-DD HH:MM\")\n\n new_item = Task(\n title=title,\n description=description,\n status=\"pending\",\n due_date=due_date,\n list_id=list_id,\n )\n db.session.add(new_item)\n db.session.commit()\n\n return make_response(json.dumps(new_item.serialize()))", "def add(request):\n\tif request.method == 'GET':\n\t\tID = request.GET.get('id',False)\n\t\tstatus = request.GET.get('status',False)\n\t\ttaskname = request.GET.get('taskname',False)\n\t\tdescription = request.GET.get('description','')\n\n\t\tprint(taskname)\n\t\terror = {}\n\t\tif not ID:\n\t\t\terror['error'] = \"id not given\"\n\t\telif not status:\n\t\t\terror['error'] = \"status not given\"\n\t\telif not taskname:\n\t\t\terror['error'] = \"taskname not given\"\n\t\telif not description:\n\t\t\terror['error'] = \"description not given\"\n\t\telse:\n\t\t\ttodo['task'].append({\"id\":ID,\"status\":status,\"taskname\":taskname,\"description\":description})\n\n\t\tif len(error) != 0:\n\t\t\tresponse = error\n\t\telse:\n\t\t\tresponse = todo['task'][-1]\n\n\treturn JsonResponse(response)", "def add_item(todo_list):\r\n text = input(\"Please enter the name of the new item\\n\")\r\n priority = check_priority_overlap(\r\n int(clean_input(\"Please enter the priority of this item\")), todo_list)\r\n # group = int(clean_input(\"Please enter the group number of this item\"))\r\n group = 0 # Set the group value to zero, group system NYI\r\n visible = True\r\n todo_list.insert(0, ListItem(text, priority, group, visible)) # Join\r\n # the inputs to be added to the overall list\r\n return", "def add_shared_items(shared_list_id):\n\n item_title = request.form[\"item_title\"]\n item_description = request.form[\"item_description\"]\n user_id = session.get(\"user_id\")\n\n if not user_id:\n raise Exception(\"No user logged in.\")\n\n to_do_list = ToDoList.query.get(shared_list_id)\n new_item = ToDoItem(item_title=item_title,\n item_description=item_description)\n to_do_list.to_do_items.append(new_item)\n\n db.session.add(new_item)\n db.session.commit()\n\n return redirect(f\"/lists/{shared_list_id}\")", "def add():\n\ttry:\n\t task = sys.argv[2]\n\t file = open(\"todo.txt\", \"a\")\n\t file.write(task + \"\\n\")\n\t print('Added todo: \"{}\"'.format(task))\n\texcept IndexError:\n\t print(\"Error: Missing todo string. Nothing added!\")", "def test_add(self):\n r = main.List.connection()\n main.List.add(r, \"ToDo\", 1, \"Buy apples\", 2, \"20.05.2015\")\n task = r.get(\"ToDo\")\n self.assertTrue(task, \"No such entry in DB. Adding failed.\")", "def add_todo(taskname, deadline, priority, reminder, deleteflag):\n autodel()\n task = {\n 'name': taskname,\n 'deadline': str(deadline),\n 'priority': priority,\n 'reminder': reminder,\n 'no_del': deleteflag\n }\n\n if not exists(task['name']):\n with open(todofile, 'a') as todo:\n try:\n jdump = json.dumps(task) + '\\n'\n todo.write(jdump)\n return 0\n except json.decoder.JSONDecodeError:\n return 1", "def add_item(self):\n\n self.todo_scroll_cell.add_item(f'{self.new_todo_textbox.get()}')", "def create_list(self, name) -> TodoList:\n t = TodoList(name, [])\n if name in self.lists:\n raise HTTPException(409, f\"TODO list with name {name} already exists\")\n self.lists[self.__to_key(name)] = t\n return t", "def todo(self, todo_id):\r\n return tdl.Todo(self, todo_id)", "def add_item(todo_list, todo_new_item):\n check = True\n try:\n todo_list.append(todo_new_item)\n except todo_list:\n print(\"Could not add new item to todo list\")\n check = False\n\n return check", "def post_add_todo_request(self):\n response = requests.post(\n url=self.url,\n headers=self.default_headers,\n json=self.habitica_todo.to_json_dict()\n )\n return get_data_or_exit(response)", "def add_list(request) -> HttpResponse:\n\n # Only staffers can add lists, regardless of TODO_STAFF_USER setting.\n if not request.user.is_staff:\n raise PermissionDenied\n\n if request.POST:\n form = AddTaskListForm(request.user, request.POST)\n if form.is_valid():\n try:\n newlist = form.save(commit=False)\n newlist.slug = slugify(newlist.name, allow_unicode=True)\n newlist.save()\n messages.success(request, \"A new list has been added.\")\n return redirect(\"todo:lists\")\n\n except IntegrityError:\n messages.warning(\n request,\n \"There was a problem saving the new list. \"\n \"Most likely a list with the same name in the same group already exists.\",\n )\n else:\n if request.user.groups.all().count() == 1:\n # FIXME: Assuming first of user's groups here; better to prompt for group\n form = AddTaskListForm(request.user, initial={\"group\": request.user.groups.all()[0]})\n else:\n form = AddTaskListForm(request.user)\n\n context = {\"form\": form}\n\n return render(request, \"todo/add_list.html\", context)", "def create_task(request):\n all_task_list = Todo.objects.all()\n form = TaskForm()\n if request.method == 'POST':\n form = TaskForm(request.POST)\n if form.is_valid():\n # create default todolist\n user = request.user if request.user.is_authenticated else None\n task = Todo(\n description=request.POST['description'],\n content= request.POST['content'],\n tesk_medium= request.POST['tesk_medium'],\n creator=user\n )\n task.save()\n return redirect('lists:alllist')\n else:\n return render(request, 'lists/index.html', {'form': form})\n\n context = {\n 'form': form, \n 'taskli':all_task_list\n }\n return render(request, 'lists/create_task.html',context )", "def test_adding_many_todos(self):\n event = Event.objects.filter(slug__endswith=\"-upcoming\") \\\n .order_by(\"-pk\")[0]\n event.end = event.start + datetime.timedelta(days=2)\n event.save()\n\n # check if the event has 0 todos\n assert event.todoitem_set.all().count() == 0\n\n # add standard todos\n ident = event.get_ident()\n url, form = self._get_initial_form('todos_add', ident)\n\n # fix: turn Nones into empty strings\n for key, value in form.items():\n if value is None:\n form[key] = ''\n\n rv = self.client.post(reverse('todos_add', args=[ident]), form)\n\n # let's check if the form passes\n assert rv.status_code == 302\n\n # finally let's check there are some new todos\n assert event.todoitem_set.all().count() == 9", "def create_todo():\n payload = request.json\n\n todo = Todo(task=payload['task'], user_id=payload['user_id'])\n\n database.session.add(todo)\n database.session.commit()\n\n return jsonify(todo.to_dict()), 201", "def add(request):\n\n context = {}\n forms = TodoForm()\n if request.method == \"POST\":\n forms = TodoForm(request.POST or None)\n if forms.is_valid():\n # If form is valid then create the todo\n forms = forms.save()\n # Update the user of new created user to be the logged in user\n forms.user = request.user\n # Save updated form\n forms.save()\n # Redirect to the home page\n return redirect(\"index\")\n \n context['form'] = forms\n return render(request, \"todos/add.html\", context)", "def todo_added(name, description):", "def do_item_add(self, arg):\n try:\n add_item = arg[\"<item_name>\"]\n add_item_str = \" \".join(add_item)\n app.ToDoApp.to_add_item(add_item_str, add_item = True)\n \n\n\n \n except ValueError as e:\n cprint((e), 'red')", "def add_task(request):\n data = {\"success\": False}\n try:\n title = request.POST.get(\"title\")\n status = request.POST.get(\"status\")\n desc = request.POST.get(\"desc\")\n tasklist = request.POST.get(\"tasklist\")\n\n todolist_obj = TodoList.objects.get(title=tasklist)\n user = request.user\n todo = Todo.objects.filter(title=title).count()\n if todo == 0:\n todo_obj = Todo.objects.create(creator=user,\n todolist=todolist_obj,\n description=desc,\n status=status,\n title=title)\n todo_obj.save()\n data[\"message\"] = \"Data Saved\"\n data[\"success\"] = True\n else:\n raise Exception(\"Task with same title already exists\")\n except Exception as ex:\n data[\"message\"] = \"Failed to save data [%s]\" % ex\n finally:\n return JsonResponse(data)", "def todos_create_page():\n todo = Todo()\n if todo.form_submit():\n todo.update(mongo.db)\n print('Created new TODO: {text}'.format(**todo.doc))\n return redirect('/')\n else:\n return render_template(\n template_name_or_list='todo.html',\n todo=todo,\n handle='Create')" ]
[ "0.7692403", "0.75500757", "0.75385946", "0.7229673", "0.7222644", "0.7201016", "0.71989423", "0.71025795", "0.70856917", "0.6990269", "0.68904996", "0.6773006", "0.6752873", "0.6706633", "0.664904", "0.661715", "0.6607402", "0.6585516", "0.6567563", "0.65658355", "0.65384394", "0.65118754", "0.6483912", "0.6453466", "0.64510745", "0.6439344", "0.6425721", "0.6424563", "0.64165545", "0.6241668" ]
0.8164502
0
Create list of default pip requirements for MLflow Models. Returns list of default pip requirements for MLflow Models produced by this flavor.
def get_default_pip_requirements(include_cloudpickle=False): _check_soft_dependencies("mlflow", severity="error") from mlflow.utils.requirements_utils import _get_pinned_requirement pip_deps = [_get_pinned_requirement("sktime")] if include_cloudpickle: pip_deps += [_get_pinned_requirement("cloudpickle")] return pip_deps
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def python_requirements(self):\n try:\n dist = self.requirement.pip_requirement.get_dist()\n extras = self.requirement.pip_requirement.extras\n requirements = list(dist.requires(extras))\n except Exception:\n logger.warning(\"Failed to determine installation requirements of %s \"\n \"using pkg-resources, falling back to old implementation.\",\n self, exc_info=True)\n requirements = self.python_requirements_fallback\n logger.debug(\"Python requirements of %s: %r\", self, requirements)\n return requirements", "def required_packages(cls) -> List[Text]:\n return []", "def install_deps():\n default = open('requirements.txt', 'r').readlines()\n new_pkgs = []\n links = []\n for resource in default:\n if 'git+https' in resource:\n pkg = resource.split('#')[-1]\n links.append(resource.strip())\n new_pkgs.append(pkg.replace('egg=', '').rstrip())\n else:\n new_pkgs.append(resource.strip())\n return new_pkgs, links", "def install_requires():\n return reqs(\"requirements.txt\")", "def install_requires():\n return reqs('requirements.txt')", "def install_requires():\n skip_install_requires = environ.get('SKIP_INSTALL_REQUIRES')\n if not skip_install_requires:\n with open('requirements.pip') as r:\n return r.readlines()\n return []", "def get_install_requires() -> List[str]:\n return [\n \n ]", "def requirements(self):\n requirements = []\n return requirements", "def _python_dependencies(self):\n dependencies = []\n if self._requires_extensions():\n self._inject_extensions_build(dependencies)\n dependencies.append('- task: UsePythonVersion@0')\n dependencies.append(' displayName: \"Setting python version to 3.6 as required by functions\"')\n dependencies.append(' inputs:')\n dependencies.append(' versionSpec: \\'3.6\\'')\n dependencies.append(' architecture: \\'x64\\'')\n dependencies.append('- script: |')\n dependencies.append(' python3.6 -m venv worker_venv')\n dependencies.append(' source worker_venv/bin/activate')\n dependencies.append(' pip3.6 install setuptools')\n if self._requires_pip():\n dependencies.append(' pip3.6 install -r requirements.txt')\n return dependencies", "def requires(self):\n return []", "def pip_requirements():\n\n require(\n \"virtualenv_path\",\n \"requirements_path\",\n \"http_proxy\",\n \"https_proxy\",\n \"sudo_user\",\n )\n cmd = \"pip install --quiet --requirement %s\" % env.requirements_path\n\n # append packages url if specified\n if env.get(\"packages_url\") is not None:\n cmd += \" -f %s\" % env.get(\"packages_url\")\n\n with context_managers.proxy(env.http_proxy, env.https_proxy):\n with context_managers.virtualenv(env.virtualenv_path):\n sudo(cmd, user=env.sudo_user)", "def install_requirements():\n local('. fabric_factory/ve/bin/activate; easy_install pip')\n local('. fabric_factory/ve/bin/activate; pip install -r requirements.txt')", "def freeze():\n dependencies = sh('pip freeze', capture=True).split(os.linesep)\n\n with open('requirements.txt', 'w') as file:\n for dep in dependencies:\n if not dep.startswith('bones-testing'):\n file.write(dep+'\\n')", "def requires():\n install_reqs = parse_requirements(join(CWD, 'requirements', 'base.txt'),\n session=False)\n return [str(ir.req) for ir in install_reqs]", "def get_requirements():\n name = 'pypeit/requirements.txt'\n\n requirements_file = os.path.join(os.path.dirname(__file__), name)\n install_requires = [line.strip().replace('==', '>=') for line in open(requirements_file)\n if not line.strip().startswith('#') and line.strip() != '']\n return install_requires", "def _system_requirement_tools(self, app: AppConfig):\n if app.target_vendor_base == DEBIAN:\n base_system_packages = [\"python3-dev\", \"build-essential\"]\n system_verify = [\"dpkg\", \"-s\"]\n system_installer = \"apt\"\n elif app.target_vendor_base == RHEL:\n base_system_packages = [\n \"python3-devel\",\n \"gcc\",\n \"make\",\n \"pkgconf-pkg-config\",\n ]\n system_verify = [\"rpm\", \"-q\"]\n system_installer = \"dnf\"\n else:\n base_system_packages = None\n system_verify = None\n system_installer = None\n\n return base_system_packages, system_verify, system_installer", "def filter_working_set_soft(working_set, requirements):\n\n unmet_requirements = []\n\n retval = pkg_resources.WorkingSet([])\n\n for req in requirements:\n try:\n dists = working_set.require(req)\n for dist in dists: retval.add(dist)\n except:\n unmet_requirements.append(req)\n\n return retval, unmet_requirements", "def install_deps():\n with open('requirements.txt', 'r') as f:\n packages = f.readlines()\n new_pkgs = []\n for resource in packages:\n new_pkgs.append(resource.strip())\n return new_pkgs", "def initial_dependencies(self) -> List[str]:\n return self.options[\"general\"][\"dependencies\"]", "def freeze():\n proc = subprocess.run(['pip', 'freeze'], stdout=subprocess.PIPE)\n with open('requirements.txt', 'wb') as fout:\n fout.write(proc.stdout)", "def get_install_requires():\n requirements = []\n for line in open('requirements.txt').readlines():\n # skip to next iteration if comment or empty line\n if line.startswith('#') or line == '' or line.startswith('http') or line.startswith('git'):\n continue\n # add line to requirements\n requirements.append(line)\n return requirements", "def getRequirements():\n\n \n cudaLibsOk = checkCUDAisAvailable() \n \n conditionalRequirements = []\n if cudaLibsOk:\n conditionalRequirements += [\"tensorflow-gpu==1.15.3\", ]\n else:\n print(\"\\n CUDA it's not available in your machine.\")\n print(\" You won't be able to use the GPU support.\\n\")\n #if olderPip or olderSetuptools:\n #tfRequirement = \"tensorflow==1.15.0\"\n #else:\n tfRequirement = \"tensorflow==1.15.3\"\n \n conditionalRequirements += [tfRequirement]\n\n return conditionalRequirements", "def requirements(self, context):\n\n requirements = []\n\n # Get all the tasks and the lists (so the .fill on lists are also\n # considered.)\n all_tasks = list(self.tasks) + list(flatten(self.tasks, context))\n for task in all_tasks:\n task_details = getattr(task, '__garcon__', None)\n if task_details:\n requirements += task_details.get('requirements', [])\n else:\n raise NoRunnerRequirementsFound()\n return set(requirements)", "def get_fsleyes_deps():\n\n # The dependency list is stored in requirements.txt\n with open(op.join(basedir, 'requirements.txt'), 'rt') as f:\n install_requires = f.readlines()\n\n return [i.strip() for i in install_requires]", "def _freeze(requirements, python):\n output = []\n try:\n version_out = subprocess.check_output(\n [python, \"--version\"], stderr=subprocess.STDOUT)\n output.append(version_out)\n version_all = version_out.decode('utf-8').split()[1]\n version = '.'.join(version_all.split('.')[:2])\n with fixtures.TempDir() as temp:\n output.append(subprocess.check_output(\n [python, '-m', 'venv', temp.path]))\n pip_bin = os.path.join(temp.path, 'bin', 'pip')\n output.append(subprocess.check_output(\n [pip_bin, 'install', '-U', 'pip', 'setuptools', 'wheel']))\n output.append(subprocess.check_output(\n [pip_bin, 'install', '-r', requirements]))\n freeze = subprocess.check_output(\n [pip_bin, 'freeze'])\n output.append(freeze)\n return (version, _parse_freeze(freeze.decode('utf-8')))\n except Exception as exc:\n if isinstance(exc, subprocess.CalledProcessError):\n output.append(exc.output)\n raise Exception(\n \"Failed to generate freeze: %s %s\"\n % (b'\\n'.join(output).decode('utf-8'), exc))", "def get_dependencies(self):\n dependencies = self._dependencies\n if self.ansible is not None:\n dependencies.append(\"ansible=={}.*\".format(self.ansible))\n else:\n dependencies.append(\"ansible\")\n # Drivers can have their own dependencies\n if self.scenario.driver is not None \\\n and self.scenario.driver in DRIVER_DEPENDENCIES.keys():\n dependencies.extend(DRIVER_DEPENDENCIES[self.scenario.driver])\n # Scenarios can specify a requirements.txt\n if self.scenario.requirements is not None:\n dependencies.append(\"-r\" + self.scenario.requirements)\n return dependencies", "def get_package_list():\n pip_freeze = subprocess.check_output(('pip', 'freeze')).decode('utf8')\n package_list = [x.strip().split('==') for x in pip_freeze.split('\\n') if x.find('==') != -1]\n package_list = [(x[0].lower(), x[1]) for x in package_list]\n return package_list", "def requirements(context):\n pip_compile = \"pip-compile --annotate --quiet\"\n\n command = (\n f\"{pip_compile} requirements/base.in \"\n f\"&& {pip_compile} requirements/local.in \"\n f\"&& {pip_compile} requirements/production.in\"\n )\n command = f\"run --rm django bash -c '{command}'\"\n run_command(context, get_local_user(), False, None, None, command)", "def cmd_generate_requirements(): \n \n for env in ('dev', 'test'):\n source = Path(ROOT, \"requirements\", f\"{env}.txt\")\n target = Path(ROOT, \"requirements\", f\"{env}.in\")\n os.system(f\"pip-compile --output-file={source} {target}\")", "def cmd_generate_requirements(): \n \n for env in ('dev', 'test'):\n source = Path(ROOT, \"requirements\", f\"{env}.txt\")\n target = Path(ROOT, \"requirements\", f\"{env}.in\")\n os.system(f\"pip-compile --output-file={source} {target}\")" ]
[ "0.67231864", "0.6240935", "0.61417305", "0.6106975", "0.6102336", "0.6072006", "0.6058914", "0.5995286", "0.5991597", "0.58851796", "0.58757514", "0.58436614", "0.58049595", "0.57882637", "0.57187045", "0.56341326", "0.5592187", "0.5590501", "0.55732214", "0.55626416", "0.553857", "0.55132204", "0.5504328", "0.55013984", "0.54835516", "0.5476323", "0.54756945", "0.5454257", "0.5435499", "0.5435499" ]
0.78039926
0
Return default Conda environment for MLflow Models. Returns The default Conda environment for MLflow Models produced by calls to
def get_default_conda_env(include_cloudpickle=False): _check_soft_dependencies("mlflow", severity="error") from mlflow.utils.environment import _mlflow_conda_env return _mlflow_conda_env( additional_pip_deps=get_default_pip_requirements(include_cloudpickle) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_current_environment():\n # Search for the environment variable set by the hutch python setup\n env = os.getenv('CONDA_ENVNAME')\n # Otherwise look for built-in Conda environment variables\n if not env:\n env = os.getenv('CONDA_DEFAULT_ENV')\n # Check the top level PYTHONPATH to see if we have packages installed in\n # development mode\n dev = os.getenv('PYTHONPATH')\n if dev:\n try:\n dev_pkgs = os.listdir(dev)\n except FileNotFoundError:\n logger.debug(\"No dev folder found\")\n dev_pkgs = list()\n else:\n dev_pkgs = list()\n return env, dev_pkgs", "def get_conda_env_name():\n env_name = os.popen('echo $CONDA_DEFAULT_ENV').read().strip()\n if env_name == '' or env_name == '$CONDA_DEFAULT_ENV':\n env_name = 'base'\n logging.info('Anaconda environment: ' + env_name)\n return env_name", "def get_default_environment(cwd=None):\n\n # NOTE(dittrich): I know this code has multiple return points\n # but it is simpler and easier to understand this way.\n #\n # Highest priority is inhereted environment variable.\n environment = os.getenv('D2_ENVIRONMENT', None)\n if environment is not None:\n return environment\n #\n # Next is saved file in current working directory.\n if cwd is None:\n cwd = os.getcwd()\n local_default = get_saved_default_environment(cwd=cwd)\n if local_default not in ['', None]:\n return local_default\n #\n # Lowest priority is the directory path basename.\n return os.path.basename(cwd)", "def get_environment(self):\r\n return self.mcas[0].get_environment()", "def env(self) -> Optional[Env]:\n raise NotImplementedError", "def _get_environmentdef():\n if 'environmentdef' not in env:\n abort(\"Environment needs to be configured\")\n\n environmentdef = env.environmentdef\n\n # If we're running via `fab`, we should restrict the environment\n # to the current host.\n if env.host_string:\n environmentdef = environmentdef.with_hosts(env.host_string)\n\n return environmentdef", "def get_environment():\n return GenericGymEnv(id=\"real-time-gym-v1\", gym_kwargs={\"config\": CONFIG_DICT})", "def environment(self) -> Optional[pulumi.Input['EnvironmentArgs']]:\n return pulumi.get(self, \"environment\")", "def get_conda_ctk():\n is_conda_env = os.path.exists(os.path.join(sys.prefix, 'conda-meta'))\n if not is_conda_env:\n return\n # Assume the existence of NVVM to imply cudatoolkit installed\n paths = find_lib('nvvm')\n if not paths:\n return\n # Use the directory name of the max path\n return os.path.dirname(max(paths))", "def get_saved_default_environment(cwd=None):\n env_file = get_local_default_file(cwd=cwd)\n saved_default = None\n if os.path.exists(env_file):\n with open(env_file, 'r') as f:\n saved_default = f.read().replace('\\n', '')\n return saved_default", "def env(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnvVarArgs']]]]:\n return pulumi.get(self, \"env\")", "def get_env(self):\n self.airflow_cluster_name = conf.get('core', 'cluster')\n bicommon = BICommon()\n self.env_type = bicommon.env\n\n self.parameters.update({'airflow_cluster_name': self.airflow_cluster_name, 'env': self.env_type})", "def get_environ():\n # Manually set environment.\n if FLAGS.env is not None:\n return BasicEnvironment.from_json(open(FLAGS.env, \"r\").read())\n\n if FLAGS.data_folder is None:\n data_folder = FLAGS.dataset\n else:\n data_folder = FLAGS.data_folder\n exp_id = \"exp_\" + FLAGS.dataset + \"_\" + FLAGS.model\n if FLAGS.id is None:\n exp_id = gen_id(exp_id)\n else:\n exp_id = FLAGS.id\n return BasicEnvironment(\n device=get_device(FLAGS.gpu),\n dataset=FLAGS.dataset,\n data_folder=data_folder,\n logs_folder=FLAGS.logs,\n save_folder=FLAGS.results,\n run_validation=FLAGS.validation,\n verbose=FLAGS.verbose,\n exp_id=exp_id,\n description=FLAGS.description,\n valid_num_fold=FLAGS.valid_num_fold,\n valid_fold_id=FLAGS.valid_fold_id)", "def env(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"env\")", "def getEnvironment(self):\n pass", "def env_from(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnvFromSourceArgs']]]]:\n return pulumi.get(self, \"env_from\")", "def make_env(env_name):\n \n env = gym.make(env_name) \n return env", "def env(self):\n return spack.schema.environment.parse(self.conf.get(\"environment\", {}))", "def default_environment():\n return dict(_VARS)", "def opencl_env() -> cldrive_env.OpenCLEnvironment:\n return cldrive_env.OclgrindOpenCLEnvironment()", "def get_env_name(self):\n if self.options.environment:\n return self.options.environment\n elif os.environ.get(\"JUJU_ENV\"):\n return os.environ['JUJU_ENV']\n\n env_ptr = os.path.join(self.juju_home, \"current-environment\")\n if os.path.exists(env_ptr):\n with open(env_ptr) as fh:\n return fh.read().strip()\n\n with open(self.get_env_conf()) as fh:\n conf = yaml.safe_load(fh.read())\n if not 'default' in conf:\n raise ConfigError(\"No Environment specified\")\n return conf['default']", "def env(self):\n return self._env", "def get_env(self, *args):\n m = module(*args)\n return m.env", "def get_default_config():\r\n config = {}\r\n\r\n config[\"kl_coeff\"] = 1.0\r\n config[\"_num_workers_tf\"] = 4\r\n config[\"use_gae\"] = True\r\n config[\"num_gpus\"] = 0\r\n\r\n config[\"_env_name_rllib\"] = \"multicomp\"\r\n config[\"_env_fcn\"] = create_env\r\n config['_policies'] = [None, \"from_scratch\", \"pretrained\"]\r\n config[\"_env\"] = {'with_video': False,\r\n \"SingleAgentToMultiAgent\": False,\r\n \"env_name\": \"multicomp/YouShallNotPassHumans-v0\"}\r\n config['framework'] = 'tfe'\r\n\r\n config['_train_policies'] = ['player_1']\r\n config['_call'] = {}\r\n config['_trainer'] = \"PPO\"\r\n config['_policy'] = \"PPO\"\r\n config['_call']['checkpoint_freq'] = 0\r\n config['_train_steps'] = 99999999\r\n config['_update_config'] = None\r\n config['_run_inline'] = False\r\n config['_postprocess'] = None\r\n\r\n config['num_envs_per_worker'] = 4\r\n config['_log_error'] = True\r\n config['_model_params'] = {\r\n \"use_lstm\": False,\r\n \"fcnet_hiddens\": [64, 64],\r\n # \"custom_action_dist\": \"DiagGaussian\",\r\n \"fcnet_activation\": \"tanh\",\r\n \"free_log_std\": True,\r\n }\r\n\r\n config['_select_policy'] = select_policy_default\r\n config['_get_policies'] = get_policies_default\r\n config['_do_not_train_policies'] = []\r\n config['_update_withpolicies'] = None\r\n config['callbacks'] = InfoCallbacks\r\n\r\n return config", "def environment(self):\n return self._environment", "def get_conda_root():\n try:\n # Fast-path\n # We're in the root environment\n conda_root = _import_conda_root()\n except ImportError:\n # We're not in the root environment.\n envs_dir = dirname(CONDA_PREFIX)\n if basename(envs_dir) == 'envs':\n # We're in a named environment: `conda create -n <name>`\n conda_root = dirname(envs_dir)\n else:\n # We're in an isolated environment: `conda create -p <path>`\n # The only way we can find out is by calling conda.\n conda_root = _conda_root_from_conda_info()\n\n return conda_root", "def conda_create_environment(name, python='3'):\n conda = '{0}/bin/conda'.format(utils.home('apps', 'miniconda'))\n\n run('{conda} create --name {name} python={python} --yes'.format(\n name=name,\n conda=conda,\n python=python))", "def get_environment(self):\n return self._environment", "def environment(self):\n return self._get_field(\"environment\")", "def prepConda(commands_list, envName = envName):\n commands_list.append('module load conda2')\n commands_list.append('source deactivate') # Removes any pre-existing conda environments\n commands_list.append('source activate {eName}'.format(eName = envName))" ]
[ "0.6355434", "0.6241017", "0.61965597", "0.61439174", "0.5763095", "0.57486796", "0.5724221", "0.5691034", "0.5655165", "0.5654611", "0.56079906", "0.55675125", "0.55185765", "0.5517381", "0.5498466", "0.5476292", "0.5472681", "0.5404192", "0.5397453", "0.53926975", "0.53794986", "0.5374743", "0.5373506", "0.53675205", "0.5347714", "0.5341046", "0.5331473", "0.5331405", "0.53286153", "0.53047717" ]
0.80523634
0
Log a sktime model as an MLflow artifact for the current run.
def log_model( sktime_model, artifact_path, conda_env=None, code_paths=None, registered_model_name=None, signature=None, input_example=None, await_registration_for=None, pip_requirements=None, extra_pip_requirements=None, serialization_format=SERIALIZATION_FORMAT_PICKLE, **kwargs, ): # TODO: can we specify a type for fitted instance of sktime model below? # noqa: E501 _check_soft_dependencies("mlflow", severity="error") from mlflow.models import Model if await_registration_for is None: from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS await_registration_for = DEFAULT_AWAIT_MAX_SLEEP_SECONDS return Model.log( artifact_path=artifact_path, flavor=utils.mlflow_sktime, registered_model_name=registered_model_name, sktime_model=sktime_model, conda_env=conda_env, code_paths=code_paths, signature=signature, input_example=input_example, await_registration_for=await_registration_for, pip_requirements=pip_requirements, extra_pip_requirements=extra_pip_requirements, serialization_format=serialization_format, **kwargs, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_model_without_starting_new_run():\n with TempDir() as tmp:\n artifact_path = \"model\"\n local_path = tmp.path(\"model\")\n mlflow_model = Model(artifact_path=artifact_path, run_id=_AUTOLOG_RUN_ID)\n save_model_kwargs = dict(\n tf_saved_model_dir=serialized.decode(\"utf-8\"),\n tf_meta_graph_tags=[tag_constants.SERVING],\n tf_signature_def_key=\"predict\",\n )\n save_model(path=local_path, mlflow_model=mlflow_model, **save_model_kwargs)\n client = MlflowClient()\n client.log_artifacts(_AUTOLOG_RUN_ID, local_path, artifact_path)\n\n try:\n client._record_logged_model(_AUTOLOG_RUN_ID, mlflow_model)\n except MlflowException:\n # We need to swallow all mlflow exceptions to maintain backwards\n # compatibility with older tracking servers. Only print out a warning\n # for now.\n _logger.warning(\n _LOG_MODEL_METADATA_WARNING_TEMPLATE,\n get_artifact_uri(_AUTOLOG_RUN_ID),\n )", "def test_log_model(auto_arima_model, tmp_path, should_start_run, serialization_format):\n try:\n if should_start_run:\n mlflow.start_run()\n artifact_path = \"sktime\"\n conda_env = tmp_path.joinpath(\"conda_env.yaml\")\n _mlflow_conda_env(conda_env, additional_pip_deps=[\"sktime\"])\n model_info = flavor.log_model(\n sktime_model=auto_arima_model,\n artifact_path=artifact_path,\n conda_env=str(conda_env),\n serialization_format=serialization_format,\n )\n model_uri = f\"runs:/{mlflow.active_run().info.run_id}/{artifact_path}\"\n assert model_info.model_uri == model_uri\n reloaded_model = flavor.load_model(\n model_uri=model_uri,\n )\n np.testing.assert_array_equal(auto_arima_model.predict(), reloaded_model.predict())\n model_path = Path(_download_artifact_from_uri(artifact_uri=model_uri))\n model_config = Model.load(str(model_path.joinpath(\"MLmodel\")))\n assert pyfunc.FLAVOR_NAME in model_config.flavors\n finally:\n mlflow.end_run()", "def log_model(self, model_name=\"fixmatch_model\"):\n \n assert hasattr(self, \"_mlflow\"), \"need to run track_with_mlflow() first\"\n from mlflow.keras import log_model\n log_model(self._models[\"full\"], model_name)", "def test_log_model_calls_register_model(auto_arima_model, tmp_path):\n artifact_path = \"sktime\"\n register_model_patch = mock.patch(\"mlflow.register_model\")\n with mlflow.start_run(), register_model_patch:\n conda_env = tmp_path.joinpath(\"conda_env.yaml\")\n _mlflow_conda_env(conda_env, additional_pip_deps=[\"sktime\"])\n flavor.log_model(\n sktime_model=auto_arima_model,\n artifact_path=artifact_path,\n conda_env=str(conda_env),\n registered_model_name=\"SktimeModel\",\n )\n model_uri = f\"runs:/{mlflow.active_run().info.run_id}/{artifact_path}\"\n mlflow.register_model.assert_called_once_with(\n model_uri,\n \"SktimeModel\",\n await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,\n )", "def log_model_artifact(self,\n path: str,\n epoch: int = None,\n scores: float or dict = None,\n opt: argparse.Namespace = None, ):\n if self.use_wandb:\n self.wandb.log_model(path, epoch, scores, opt)\n else:\n self.log_message(\"Does not support upload dataset artifact to Weight & Biases.\")", "def log_model(artifact_path, **kwargs):\n with TempDir() as tmp:\n local_path = tmp.path(artifact_path)\n run_id = active_run().info.run_uuid\n if 'model' in kwargs:\n raise Exception(\"Unused argument 'model'. log_model creates a new model object\")\n\n save_model(dst_path=local_path, model=Model(artifact_path=artifact_path, run_id=run_id),\n **kwargs)\n log_artifacts(local_path, artifact_path)", "def run(\n trained_model: Ridge,\n mlflow: mlflow,\n model_name: str = \"diabetes\",\n app_logger: AppLogger = get_disabled_logger(),\n parent_tracer: Tracer = None,\n) -> ModelVersion:\n logger = logging.getLogger(__name__)\n try:\n component_name = \"Diabetes_Publish_Model\"\n\n # mlflow tracking\n mlflow_run = mlflow.active_run()\n mlflow_run_id = mlflow_run.info.run_id\n mlflow_experiment_id = mlflow_run.info.experiment_id\n\n logger = app_logger.get_logger(\n component_name=component_name,\n custom_dimensions={\n \"mlflow_run_id\": mlflow_run_id,\n \"mlflow_experiment_id\": mlflow_experiment_id,\n },\n )\n tracer = app_logger.get_tracer(\n component_name=component_name, parent_tracer=parent_tracer\n )\n\n logger.info(\"Running MLOps publish model\")\n\n temp_model_dir = tempfile.mkdtemp()\n model_path = os.path.join(temp_model_dir, model_name)\n with tracer.span(\"save_model\"):\n mlflow.sklearn.save_model(trained_model, model_path)\n mlflow.log_artifact(model_path)\n model_uri = \"runs:/{run_id}/{artifact_path}\".format(\n run_id=mlflow.active_run().info.run_id, artifact_path=model_name\n )\n\n logger.info(\"Publishing trained model into mlflow model registry\")\n with tracer.span(\"register_model\"):\n model_details = mlflow.register_model(model_uri=model_uri, name=model_name)\n model_version = model_details.version\n\n mlflow.log_param(\"model_version\", model_version)\n mlflow.log_param(\"model_name\", model_name)\n\n logger.info(f\"published model name: {model_name}, version: {model_version}\")\n logger.info(\"Completed MLOps publish model\")\n\n return model_details\n except Exception as exp:\n logger.error(\"an exception occurred in publish model\")\n raise Exception(\"an exception occurred in publish model\") from exp", "def test_log_model_no_registered_model_name(auto_arima_model, tmp_path):\n artifact_path = \"sktime\"\n register_model_patch = mock.patch(\"mlflow.register_model\")\n with mlflow.start_run(), register_model_patch:\n conda_env = tmp_path.joinpath(\"conda_env.yaml\")\n _mlflow_conda_env(conda_env, additional_pip_deps=[\"sktime\"])\n flavor.log_model(\n sktime_model=auto_arima_model,\n artifact_path=artifact_path,\n conda_env=str(conda_env),\n )\n mlflow.register_model.assert_not_called()", "def autolog(\n every_n_iter=1,\n log_models=True,\n disable=False,\n exclusive=False,\n disable_for_unsupported_versions=False,\n silent=False,\n): # pylint: disable=unused-argument\n # pylint: disable=E0611\n import tensorflow\n\n global _LOG_EVERY_N_STEPS\n _LOG_EVERY_N_STEPS = every_n_iter\n\n atexit.register(_flush_queue)\n\n if Version(tensorflow.__version__) < Version(\"1.12\"):\n warnings.warn(\"Could not log to MLflow. TensorFlow versions below 1.12 are not supported.\")\n return\n\n try:\n from tensorflow.python.summary.writer.event_file_writer import EventFileWriter\n from tensorflow.python.summary.writer.event_file_writer_v2 import EventFileWriterV2\n from tensorflow.python.saved_model import tag_constants\n from tensorflow.python.summary.writer.writer import FileWriter\n except ImportError:\n warnings.warn(\"Could not log to MLflow. TensorFlow versions below 1.12 are not supported.\")\n return\n\n def train(original, self, *args, **kwargs):\n active_run = mlflow.active_run()\n global _AUTOLOG_RUN_ID\n _AUTOLOG_RUN_ID = active_run.info.run_id\n\n # Checking step and max_step parameters for logging\n if len(args) >= 3:\n mlflow.log_param(\"steps\", args[2])\n if len(args) >= 4:\n mlflow.log_param(\"max_steps\", args[3])\n if \"steps\" in kwargs:\n mlflow.log_param(\"steps\", kwargs[\"steps\"])\n if \"max_steps\" in kwargs:\n mlflow.log_param(\"max_steps\", kwargs[\"max_steps\"])\n\n result = original(self, *args, **kwargs)\n\n # Flush the metrics queue after training completes\n _flush_queue()\n\n # Log Tensorboard event files as artifacts\n if os.path.exists(self.model_dir):\n for file in os.listdir(self.model_dir):\n if \"tfevents\" not in file:\n continue\n mlflow.log_artifact(\n local_path=os.path.join(self.model_dir, file),\n artifact_path=\"tensorboard_logs\",\n )\n return result\n\n def export_saved_model(original, self, *args, **kwargs):\n global _AUTOLOG_RUN_ID\n if _AUTOLOG_RUN_ID:\n _logger.info(\n \"Logging TensorFlow Estimator as MLflow Model to run with ID '%s'\", _AUTOLOG_RUN_ID\n )\n\n serialized = original(self, *args, **kwargs)\n\n def log_model_without_starting_new_run():\n \"\"\"\n Performs the exact same operations as `log_model` without starting a new run\n \"\"\"\n with TempDir() as tmp:\n artifact_path = \"model\"\n local_path = tmp.path(\"model\")\n mlflow_model = Model(artifact_path=artifact_path, run_id=_AUTOLOG_RUN_ID)\n save_model_kwargs = dict(\n tf_saved_model_dir=serialized.decode(\"utf-8\"),\n tf_meta_graph_tags=[tag_constants.SERVING],\n tf_signature_def_key=\"predict\",\n )\n save_model(path=local_path, mlflow_model=mlflow_model, **save_model_kwargs)\n client = MlflowClient()\n client.log_artifacts(_AUTOLOG_RUN_ID, local_path, artifact_path)\n\n try:\n client._record_logged_model(_AUTOLOG_RUN_ID, mlflow_model)\n except MlflowException:\n # We need to swallow all mlflow exceptions to maintain backwards\n # compatibility with older tracking servers. Only print out a warning\n # for now.\n _logger.warning(\n _LOG_MODEL_METADATA_WARNING_TEMPLATE,\n get_artifact_uri(_AUTOLOG_RUN_ID),\n )\n\n log_model_without_starting_new_run()\n\n _AUTOLOG_RUN_ID = None\n\n return serialized\n\n @picklable_exception_safe_function\n def _get_early_stop_callback(callbacks):\n for callback in callbacks:\n if isinstance(callback, tensorflow.keras.callbacks.EarlyStopping):\n return callback\n return None\n\n def _log_early_stop_callback_params(callback):\n if callback:\n try:\n earlystopping_params = {\n \"monitor\": callback.monitor,\n \"min_delta\": callback.min_delta,\n \"patience\": callback.patience,\n \"baseline\": callback.baseline,\n \"restore_best_weights\": callback.restore_best_weights,\n }\n mlflow.log_params(earlystopping_params)\n except Exception: # pylint: disable=W0703\n return\n\n def _get_early_stop_callback_attrs(callback):\n try:\n return callback.stopped_epoch, callback.restore_best_weights, callback.patience\n except Exception: # pylint: disable=W0703\n return None\n\n def _log_early_stop_callback_metrics(callback, history, metrics_logger):\n if callback is None or not callback.model.stop_training:\n return\n\n callback_attrs = _get_early_stop_callback_attrs(callback)\n if callback_attrs is None:\n return\n\n stopped_epoch, restore_best_weights, _ = callback_attrs\n metrics_logger.record_metrics({\"stopped_epoch\": stopped_epoch})\n\n if not restore_best_weights or callback.best_weights is None:\n return\n\n monitored_metric = history.history.get(callback.monitor)\n if not monitored_metric:\n return\n\n initial_epoch = history.epoch[0]\n # If `monitored_metric` contains multiple best values (e.g. [0.1, 0.1, 0.2] where 0.1 is\n # the minimum loss), the epoch corresponding to the first occurrence of the best value is\n # the best epoch. In keras > 2.6.0, the best epoch can be obtained via the `best_epoch`\n # attribute of an `EarlyStopping` instance: https://github.com/keras-team/keras/pull/15197\n restored_epoch = initial_epoch + monitored_metric.index(callback.best)\n metrics_logger.record_metrics({\"restored_epoch\": restored_epoch})\n restored_index = history.epoch.index(restored_epoch)\n restored_metrics = {\n key: metrics[restored_index] for key, metrics in history.history.items()\n }\n # Checking that a metric history exists\n metric_key = next(iter(history.history), None)\n if metric_key is not None:\n metrics_logger.record_metrics(restored_metrics, stopped_epoch + 1)\n\n class FitPatch(PatchFunction):\n def __init__(self):\n self.log_dir = None\n\n def _patch_implementation(\n self, original, inst, *args, **kwargs\n ): # pylint: disable=arguments-differ\n unlogged_params = [\"self\", \"x\", \"y\", \"callbacks\", \"validation_data\", \"verbose\"]\n\n log_fn_args_as_params(original, args, kwargs, unlogged_params)\n early_stop_callback = None\n\n run_id = mlflow.active_run().info.run_id\n with batch_metrics_logger(run_id) as metrics_logger:\n # Check if the 'callback' argument of fit() is set positionally\n if len(args) >= 6:\n # Convert the positional training function arguments to a list in order to\n # mutate the contents\n args = list(args)\n # Make a shallow copy of the preexisting callbacks to avoid permanently\n # modifying their contents for future training invocations. Introduce\n # TensorBoard & tf.keras callbacks if necessary\n callbacks = list(args[5])\n callbacks, self.log_dir = _setup_callbacks(\n callbacks, log_models, metrics_logger\n )\n # Replace the callbacks positional entry in the copied arguments and convert\n # the arguments back to tuple form for usage in the training function\n args[5] = callbacks\n args = tuple(args)\n else:\n # Make a shallow copy of the preexisting callbacks and introduce TensorBoard\n # & tf.keras callbacks if necessary\n callbacks = list(kwargs.get(\"callbacks\") or [])\n kwargs[\"callbacks\"], self.log_dir = _setup_callbacks(\n callbacks, log_models, metrics_logger\n )\n\n early_stop_callback = _get_early_stop_callback(callbacks)\n _log_early_stop_callback_params(early_stop_callback)\n\n history = original(inst, *args, **kwargs)\n\n _log_early_stop_callback_metrics(\n callback=early_stop_callback,\n history=history,\n metrics_logger=metrics_logger,\n )\n\n _flush_queue()\n mlflow.log_artifacts(\n local_dir=self.log_dir.location,\n artifact_path=\"tensorboard_logs\",\n )\n if self.log_dir.is_temp:\n shutil.rmtree(self.log_dir.location)\n\n return history\n\n def _on_exception(self, exception):\n if (\n self.log_dir is not None\n and self.log_dir.is_temp\n and os.path.exists(self.log_dir.location)\n ):\n shutil.rmtree(self.log_dir.location)\n\n class FitGeneratorPatch(PatchFunction):\n \"\"\"\n NOTE: `fit_generator()` is deprecated in TF >= 2.1.0 and simply wraps `fit()`.\n To avoid unintentional creation of nested MLflow runs caused by a patched\n `fit_generator()` method calling a patched `fit()` method, we only patch\n `fit_generator()` in TF < 2.1.0.\n \"\"\"\n\n def __init__(self):\n self.log_dir = None\n\n def _patch_implementation(\n self, original, inst, *args, **kwargs\n ): # pylint: disable=arguments-differ\n unlogged_params = [\"self\", \"generator\", \"callbacks\", \"validation_data\", \"verbose\"]\n\n log_fn_args_as_params(original, args, kwargs, unlogged_params)\n\n run_id = mlflow.active_run().info.run_id\n\n with batch_metrics_logger(run_id) as metrics_logger:\n # Check if the 'callback' argument of fit() is set positionally\n if len(args) >= 5:\n # Convert the positional training function arguments to a list in order to\n # mutate the contents\n args = list(args)\n # Make a shallow copy of the preexisting callbacks to avoid permanently\n # modifying their contents for future training invocations. Introduce\n # TensorBoard & tf.keras callbacks if necessary\n callbacks = list(args[4])\n callbacks, self.log_dir = _setup_callbacks(\n callbacks, log_models, metrics_logger\n )\n # Replace the callbacks positional entry in the copied arguments and convert\n # the arguments back to tuple form for usage in the training function\n args[4] = callbacks\n args = tuple(args)\n else:\n # Make a shallow copy of the preexisting callbacks and introduce TensorBoard\n # & tf.keras callbacks if necessary\n callbacks = list(kwargs.get(\"callbacks\") or [])\n kwargs[\"callbacks\"], self.log_dir = _setup_callbacks(\n callbacks, log_models, metrics_logger\n )\n\n result = original(inst, *args, **kwargs)\n\n _flush_queue()\n mlflow.log_artifacts(local_dir=self.log_dir.location, artifact_path=\"tensorboard_logs\")\n if self.log_dir.is_temp:\n shutil.rmtree(self.log_dir.location)\n\n return result\n\n def _on_exception(self, exception):\n if (\n self.log_dir is not None\n and self.log_dir.is_temp\n and os.path.exists(self.log_dir.location)\n ):\n shutil.rmtree(self.log_dir.location)\n\n def add_event(original, self, event):\n _log_event(event)\n return original(self, event)\n\n def add_summary(original, self, *args, **kwargs):\n result = original(self, *args, **kwargs)\n _flush_queue()\n return result\n\n managed = [\n (tensorflow.estimator.Estimator, \"train\", train),\n (tensorflow.keras.Model, \"fit\", FitPatch),\n ]\n\n if Version(tensorflow.__version__) < Version(\"2.1.0\"):\n # `fit_generator()` is deprecated in TF >= 2.1.0 and simply wraps `fit()`.\n # To avoid unintentional creation of nested MLflow runs caused by a patched\n # `fit_generator()` method calling a patched `fit()` method, we only patch\n # `fit_generator()` in TF < 2.1.0\n managed.append((tensorflow.keras.Model, \"fit_generator\", FitGeneratorPatch))\n\n non_managed = [\n (EventFileWriter, \"add_event\", add_event),\n (EventFileWriterV2, \"add_event\", add_event),\n (FileWriter, \"add_summary\", add_summary),\n (tensorflow.estimator.Estimator, \"export_saved_model\", export_saved_model),\n (tensorflow.estimator.Estimator, \"export_savedmodel\", export_saved_model),\n ]\n\n # Add compat.v1 Estimator patching for versions of tensfor that are 2.0+.\n if Version(tensorflow.__version__) >= Version(\"2.0.0\"):\n old_estimator_class = tensorflow.compat.v1.estimator.Estimator\n v1_train = (old_estimator_class, \"train\", train)\n v1_export_saved_model = (old_estimator_class, \"export_saved_model\", export_saved_model)\n v1_export_savedmodel = (old_estimator_class, \"export_savedmodel\", export_saved_model)\n\n managed.append(v1_train)\n non_managed.append(v1_export_saved_model)\n non_managed.append(v1_export_savedmodel)\n\n for p in managed:\n safe_patch(FLAVOR_NAME, *p, manage_run=True)\n\n for p in non_managed:\n safe_patch(FLAVOR_NAME, *p)", "def train_logger(model_fn):\n\n @functools.wraps(model_fn)\n def wrapper(*args, **kwargs):\n timer_start = time.perf_counter()\n model = model_fn(*args, **kwargs)\n timer_end = time.perf_counter()\n\n time_stamp = time.localtime()\n model_version = MODEL_VERSION\n run_time = timer_end - timer_start\n\n log_entry = [time_stamp, MODEL_VERSION, run_time]\n\n header = \\\n ','.join(TRAIN_HEADER) \\\n if os.path.exists(CWD, LOG_PATH, LOG_FILE.format(LOG_TYPES['T'])) \\\n else False\n\n create_or_update_log(LOG_TYPES['T'], log_entry, header)\n\n print('Logging: train logger')\n\n return model\n return wrapper", "def log_artifact(self, filename=''):\n return os.path.join(os.sep, 'opt', 'ml', 'model', filename)", "def log_model(\n tf_saved_model_dir,\n tf_meta_graph_tags,\n tf_signature_def_key,\n artifact_path,\n conda_env=None,\n signature: ModelSignature = None,\n input_example: ModelInputExample = None,\n registered_model_name=None,\n await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,\n pip_requirements=None,\n extra_pip_requirements=None,\n):\n return Model.log(\n artifact_path=artifact_path,\n flavor=mlflow.tensorflow,\n tf_saved_model_dir=tf_saved_model_dir,\n tf_meta_graph_tags=tf_meta_graph_tags,\n tf_signature_def_key=tf_signature_def_key,\n conda_env=conda_env,\n registered_model_name=registered_model_name,\n signature=signature,\n input_example=input_example,\n await_registration_for=await_registration_for,\n pip_requirements=pip_requirements,\n extra_pip_requirements=extra_pip_requirements,\n )", "def logging_init(model, graph):\n # Add ops to record summaries for loss and accuracy...\n train_loss = tf.summary.scalar(\"train_loss\", model.loss)\n train_accuracy = tf.summary.scalar(\"train_accuracy\", model.accuracy)\n # ...then merge these ops into one single op so that they easily be run\n # together\n train_summary_ops = tf.summary.merge([train_loss, train_accuracy])\n # Same ops, but with different names, so that train/test results show up\n # separately in TensorBoard\n test_loss = tf.summary.scalar(\"test_loss\", model.loss)\n test_accuracy = tf.summary.scalar(\"test_accuracy\", model.accuracy)\n test_summary_ops = tf.summary.merge([test_loss, test_accuracy])\n\n timestamp = int(time.time())\n run_log_dir = os.path.join(LOGS_DIR, str(timestamp))\n os.makedirs(run_log_dir)\n # (this step also writes the graph to the events file so that\n # it shows up in TensorBoard)\n summary_writer = tf.summary.FileWriter(run_log_dir, graph)\n\n return train_summary_ops, test_summary_ops, summary_writer", "def save_model(\n sktime_model,\n path,\n conda_env=None,\n code_paths=None,\n mlflow_model=None,\n signature=None,\n input_example=None,\n pip_requirements=None,\n extra_pip_requirements=None,\n serialization_format=SERIALIZATION_FORMAT_PICKLE,\n): # TODO: can we specify a type for fitted instance of sktime model below? # noqa: E501\n _check_soft_dependencies(\"mlflow\", severity=\"error\")\n from mlflow.exceptions import MlflowException\n from mlflow.models import Model\n from mlflow.models.model import MLMODEL_FILE_NAME\n from mlflow.models.utils import _save_example\n from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE\n from mlflow.utils.environment import (\n _CONDA_ENV_FILE_NAME,\n _CONSTRAINTS_FILE_NAME,\n _PYTHON_ENV_FILE_NAME,\n _REQUIREMENTS_FILE_NAME,\n _process_conda_env,\n _process_pip_requirements,\n _PythonEnv,\n _validate_env_arguments,\n )\n from mlflow.utils.file_utils import write_to\n from mlflow.utils.model_utils import (\n _validate_and_copy_code_paths,\n _validate_and_prepare_target_save_path,\n )\n\n _validate_env_arguments(conda_env, pip_requirements, extra_pip_requirements)\n\n if serialization_format not in SUPPORTED_SERIALIZATION_FORMATS:\n raise MlflowException(\n message=(\n \"Unrecognized serialization format: {serialization_format}. \"\n \"Please specify one of the following supported formats: \"\n \"{supported_formats}.\".format(\n serialization_format=serialization_format,\n supported_formats=SUPPORTED_SERIALIZATION_FORMATS,\n )\n ),\n error_code=INVALID_PARAMETER_VALUE,\n )\n\n _validate_and_prepare_target_save_path(path)\n code_dir_subpath = _validate_and_copy_code_paths(code_paths, path)\n\n if mlflow_model is None:\n mlflow_model = Model()\n if signature is not None:\n mlflow_model.signature = signature\n if input_example is not None:\n _save_example(mlflow_model, input_example, path)\n\n model_data_subpath = \"model.pkl\"\n model_data_path = os.path.join(path, model_data_subpath)\n _save_model(\n sktime_model, model_data_path, serialization_format=serialization_format\n )\n\n pyfunc.add_to_model(\n mlflow_model,\n loader_module=\"sktime.utils.mlflow_sktime\",\n model_path=model_data_subpath,\n conda_env=_CONDA_ENV_FILE_NAME,\n python_env=_PYTHON_ENV_FILE_NAME,\n code=code_dir_subpath,\n )\n\n mlflow_model.add_flavor(\n FLAVOR_NAME,\n pickled_model=model_data_subpath,\n sktime_version=sktime.__version__,\n serialization_format=serialization_format,\n code=code_dir_subpath,\n )\n mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))\n\n if conda_env is None:\n if pip_requirements is None:\n include_cloudpickle = (\n serialization_format == SERIALIZATION_FORMAT_CLOUDPICKLE\n )\n default_reqs = get_default_pip_requirements(include_cloudpickle)\n default_reqs = sorted(default_reqs)\n else:\n default_reqs = None\n conda_env, pip_requirements, pip_constraints = _process_pip_requirements(\n default_reqs, pip_requirements, extra_pip_requirements\n )\n else:\n conda_env, pip_requirements, pip_constraints = _process_conda_env(conda_env)\n\n with open(os.path.join(path, _CONDA_ENV_FILE_NAME), \"w\") as f:\n yaml.safe_dump(conda_env, stream=f, default_flow_style=False)\n\n if pip_constraints:\n write_to(os.path.join(path, _CONSTRAINTS_FILE_NAME), \"\\n\".join(pip_constraints))\n\n write_to(os.path.join(path, _REQUIREMENTS_FILE_NAME), \"\\n\".join(pip_requirements))\n\n _PythonEnv.current().to_yaml(os.path.join(path, _PYTHON_ENV_FILE_NAME))", "def logWorkflowStep(self, pid, desc):\n\n self.dbase.logStep(pid, desc, self.scene)\n return", "def train():\n import trace\n trace.train()", "def on_log(self):\n monitors = self.monitors\n if self.monitors is None:\n monitors = self.trainer.metrics.keys()\n\n\n hparams = self.hparams\n if self.hparams is None:\n hparams = self.trainer.hparams.keys()\n\n metrics = {name: format_metric(self.trainer.metrics[name])\n for name in monitors\n if name in self.trainer.metrics}\n hparams = {name: format_metric(self.trainer.hparams[name])\n for name in hparams\n if name in self.trainer.hparams}\n\n\n step_bar = self.step_bars[-1]\n step_bar.set_description(\"Epoch {}\".format(self.trainer.epoch+1))\n step_bar.set_postfix(**metrics, **hparams)\n step_bar.update(self.trainer.steps_trained - self.last_step)\n self.last_step = self.trainer.steps_trained", "def save_model_trace(output_path: str, model, trace):\n with open(output_path, \"wb\") as buff:\n pickle.dump({\"model\": model, \"trace\": trace}, buff)", "def write_model_to_tensorboard(self, *args, **kwargs):\n pass", "def _visualise_model(\n ax: plt.Axes, model: BaseModel, ts: ArrayLike, model_color: str, growth_period_color: str, maturation_offset: float\n) -> None:\n # Visualise the fit\n ts = np.asarray(ts)\n ys = model.predict(ts)\n ax.plot(ts, ys, c=model_color, label=\"Predicted\")\n\n # Visualise the time of maximal activity and the growth period\n core.mark_phase(\n ax,\n point=model.time_maximal_activity + maturation_offset,\n interval=model.growth_period + maturation_offset,\n color=growth_period_color,\n )", "def log_train_step(self, train_log: dict, step: Union[int,None] = None) -> None:\n if self.log_mlflow:\n mlflow.log_metrics(train_log, step=step)", "def run_model_pipeline_for_trace(self, trace, tuning=True):\n pass", "def log_test_step(self, test_log: dict, step: Union[int,None] = None) -> None:\n if self.log_mlflow:\n mlflow.log_metrics(test_log, step=self.e)", "def fsl_run_level_wf(\n model,\n step,\n bids_dir,\n output_dir,\n work_dir,\n subject_id,\n database_path,\n smoothing_fwhm=None,\n smoothing_level=None,\n smoothing_type=None,\n use_rapidart=False,\n detrend_poly=None,\n align_volumes=None,\n smooth_autocorrelations=False,\n despike=False,\n name=\"fsl_run_level_wf\",\n):\n bids_dir = Path(bids_dir)\n work_dir = Path(work_dir)\n workflow = pe.Workflow(name=name)\n\n level = step[\"Level\"]\n\n dimensionality = 3 # Nipype FSL.SUSAN Default\n if smoothing_type == \"inp\":\n dimensionality = 2\n\n workflow.__desc__ = \"\"\n (work_dir / model[\"Name\"]).mkdir(exist_ok=True)\n\n include_entities = {}\n if \"Input\" in model:\n if \"Include\" in model[\"Input\"]:\n include_entities = model[\"Input\"][\"Include\"]\n include_entities.update({\"subject\": subject_id})\n\n getter = pe.Node(\n BIDSGet(\n database_path=database_path,\n fixed_entities=include_entities,\n align_volumes=align_volumes,\n ),\n name=\"func_select\",\n )\n\n get_info = pe.MapNode(\n GetRunModelInfo(model=step, detrend_poly=detrend_poly),\n iterfield=[\"metadata_file\", \"regressor_file\", \"events_file\", \"entities\"],\n name=f\"get_{level}_info\",\n )\n\n despiker = pe.MapNode(\n afni.Despike(outputtype=\"NIFTI_GZ\"), iterfield=[\"in_file\"], name=\"despiker\",\n )\n\n realign_runs = pe.MapNode(\n fsl.MCFLIRT(output_type=\"NIFTI_GZ\", interpolation=\"sinc\"),\n iterfield=[\"in_file\", \"ref_file\"],\n name=\"func_realign\",\n )\n\n wrangle_volumes = pe.MapNode(\n IdentityInterface(fields=[\"functional_file\"]),\n iterfield=[\"functional_file\"],\n name=\"wrangle_volumes\",\n )\n\n specify_model = pe.MapNode(\n modelgen.SpecifyModel(high_pass_filter_cutoff=-1.0, input_units=\"secs\"),\n iterfield=[\"functional_runs\", \"subject_info\", \"time_repetition\"],\n name=f\"model_{level}_specify\",\n )\n\n fit_model = pe.MapNode(\n IdentityInterface(\n fields=[\"session_info\", \"interscan_interval\", \"contrasts\", \"functional_data\"],\n mandatory_inputs=True,\n ),\n iterfield=[\"functional_data\", \"session_info\", \"interscan_interval\", \"contrasts\"],\n name=f\"model_{level}_fit\",\n )\n\n first_level_design = pe.MapNode(\n fsl.Level1Design(bases={\"dgamma\": {\"derivs\": False}}, model_serial_correlations=False,),\n iterfield=[\"session_info\", \"interscan_interval\", \"contrasts\"],\n name=f\"model_{level}_design\",\n )\n\n generate_model = pe.MapNode(\n fsl.FEATModel(output_type=\"NIFTI_GZ\"),\n iterfield=[\"fsf_file\", \"ev_files\"],\n name=f\"model_{level}_generate\",\n )\n\n estimate_model = pe.MapNode(\n fsl.FILMGLS(\n threshold=0.0, # smooth_autocorr=True\n output_type=\"NIFTI_GZ\",\n results_dir=\"results\",\n smooth_autocorr=False,\n autocorr_noestimate=True,\n ),\n iterfield=[\"design_file\", \"in_file\", \"tcon_file\"],\n name=f\"model_{level}_estimate\",\n )\n\n if smooth_autocorrelations:\n first_level_design.inputs.model_serial_correlations = True\n estimate_model.inputs.smooth_autocorr = True\n estimate_model.inputs.autocorr_noestimate = False\n\n calculate_p = pe.MapNode(\n fsl.ImageMaths(output_type=\"NIFTI_GZ\", op_string=\"-ztop\", suffix=\"_pval\"),\n iterfield=[\"in_file\"],\n name=f\"model_{level}_caculate_p\",\n )\n\n image_pattern = (\n \"[sub-{subject}/][ses-{session}/]\"\n \"[sub-{subject}_][ses-{session}_]\"\n \"task-{task}_[acq-{acquisition}_]\"\n \"[rec-{reconstruction}_][run-{run}_]\"\n \"[echo-{echo}_][space-{space}_]contrast-{contrast}_\"\n \"stat-{stat<effect|variance|z|p|t|F>}_statmap.nii.gz\"\n )\n\n run_rapidart = pe.MapNode(\n ra.ArtifactDetect(\n use_differences=[True, False],\n use_norm=True,\n zintensity_threshold=3,\n norm_threshold=1,\n bound_by_brainmask=True,\n mask_type=\"file\",\n parameter_source=\"FSL\",\n ),\n iterfield=[\"realignment_parameters\", \"realigned_files\", \"mask_file\"],\n name=\"rapidart_run\",\n )\n\n reshape_rapidart = pe.MapNode(\n Function(\n input_names=[\"run_info\", \"functional_file\", \"outlier_file\", \"contrast_entities\"],\n output_names=[\"run_info\", \"contrast_entities\"],\n function=utils.reshape_ra,\n ),\n iterfield=[\"run_info\", \"functional_file\", \"outlier_file\", \"contrast_entities\"],\n name=\"reshape_rapidart\",\n )\n\n mean_img = pe.MapNode(\n fsl.ImageMaths(output_type=\"NIFTI_GZ\", op_string=\"-Tmean\", suffix=\"_mean\"),\n iterfield=[\"in_file\", \"mask_file\"],\n name=\"smooth_susan_avgimg\",\n )\n\n median_img = pe.MapNode(\n fsl.ImageStats(output_type=\"NIFTI_GZ\", op_string=\"-k %s -p 50\"),\n iterfield=[\"in_file\", \"mask_file\"],\n name=\"smooth_susan_medimg\",\n )\n\n merge = pe.Node(Merge(2, axis=\"hstack\"), name=\"smooth_merge\")\n\n run_susan = pe.MapNode(\n fsl.SUSAN(output_type=\"NIFTI_GZ\"),\n iterfield=[\"in_file\", \"brightness_threshold\", \"usans\"],\n name=\"smooth_susan\",\n )\n\n mask_functional = pe.MapNode(\n ApplyMask(), iterfield=[\"in_file\", \"mask_file\"], name=\"mask_functional\"\n )\n\n # Exists solely to correct undesirable behavior of FSL\n # that results in loss of constant columns\n correct_matrices = pe.MapNode(\n Function(\n input_names=[\"design_matrix\"],\n output_names=[\"design_matrix\"],\n function=utils.correct_matrix,\n ),\n iterfield=[\"design_matrix\"],\n run_without_submitting=True,\n name=f\"correct_{level}_matrices\",\n )\n\n collate = pe.Node(\n MergeAll(\n fields=[\n \"effect_maps\",\n \"variance_maps\",\n \"zscore_maps\",\n \"pvalue_maps\",\n \"tstat_maps\",\n \"contrast_metadata\",\n ],\n check_lengths=True,\n ),\n name=f\"collate_{level}\",\n )\n\n collate_outputs = pe.Node(\n CollateWithMetadata(\n fields=[\"effect_maps\", \"variance_maps\", \"zscore_maps\", \"pvalue_maps\", \"tstat_maps\"],\n field_to_metadata_map={\n \"effect_maps\": {\"stat\": \"effect\"},\n \"variance_maps\": {\"stat\": \"variance\"},\n \"zscore_maps\": {\"stat\": \"z\"},\n \"pvalue_maps\": {\"stat\": \"p\"},\n \"tstat_maps\": {\"stat\": \"t\"},\n },\n ),\n name=f\"collate_{level}_outputs\",\n )\n\n plot_matrices = pe.MapNode(\n PlotMatrices(output_dir=output_dir, database_path=database_path),\n iterfield=[\"mat_file\", \"con_file\", \"entities\", \"run_info\"],\n run_without_submitting=True,\n name=f\"plot_{level}_matrices\",\n )\n\n ds_contrast_maps = pe.MapNode(\n BIDSDataSink(base_directory=output_dir, path_patterns=image_pattern),\n iterfield=[\"entities\", \"in_file\"],\n run_without_submitting=True,\n name=f\"ds_{level}_contrast_maps\",\n )\n\n wrangle_outputs = pe.Node(\n IdentityInterface(fields=[\"contrast_metadata\", \"contrast_maps\"]),\n name=f\"wrangle_{level}_outputs\",\n )\n\n # Setup connections among nodes\n workflow.connect(\n [\n (\n getter,\n get_info,\n [\n (\"metadata_files\", \"metadata_file\"),\n (\"events_files\", \"events_file\"),\n (\"regressor_files\", \"regressor_file\"),\n (\"entities\", \"entities\"),\n ],\n )\n ]\n )\n\n if align_volumes and despike:\n workflow.connect(\n [\n (getter, despiker, [(\"functional_files\", \"in_file\")]),\n (despiker, realign_runs, [(\"out_file\", \"in_file\")]),\n (getter, realign_runs, [(\"reference_files\", \"ref_file\")]),\n (realign_runs, wrangle_volumes, [(\"out_file\", \"functional_file\")],),\n ]\n )\n elif align_volumes and not despike:\n workflow.connect(\n [\n (\n getter,\n realign_runs,\n [(\"functional_files\", \"in_file\"), (\"reference_files\", \"ref_file\")],\n ),\n (realign_runs, wrangle_volumes, [(\"out_file\", \"functional_file\")],),\n ]\n )\n elif despike:\n workflow.connect(\n [\n (getter, despiker, [(\"functional_files\", \"in_file\")]),\n (despiker, wrangle_volumes, [(\"out_file\", \"functional_file\")]),\n ]\n )\n else:\n workflow.connect([(getter, wrangle_volumes, [(\"functional_files\", \"functional_file\")])])\n\n if use_rapidart:\n workflow.connect(\n [\n (get_info, run_rapidart, [(\"motion_parameters\", \"realignment_parameters\")]),\n (getter, run_rapidart, [(\"mask_files\", \"mask_file\")]),\n (wrangle_volumes, run_rapidart, [(\"functional_file\", \"realigned_files\")],),\n (run_rapidart, reshape_rapidart, [(\"outlier_files\", \"outlier_file\")],),\n (\n get_info,\n reshape_rapidart,\n [(\"run_info\", \"run_info\"), (\"contrast_entities\", \"contrast_entities\")],\n ),\n (wrangle_volumes, reshape_rapidart, [(\"functional_file\", \"functional_file\")]),\n (reshape_rapidart, specify_model, [(\"run_info\", \"subject_info\")],),\n (reshape_rapidart, plot_matrices, [(\"run_info\", \"run_info\")]),\n (reshape_rapidart, collate, [(\"contrast_entities\", \"contrast_metadata\")]),\n ]\n )\n else:\n workflow.connect(\n [\n (get_info, specify_model, [(\"run_info\", \"subject_info\")]),\n (get_info, plot_matrices, [(\"run_info\", \"run_info\")]),\n (get_info, collate, [(\"contrast_entities\", \"contrast_metadata\")],),\n ]\n )\n\n if smoothing_level == \"l1\" or smoothing_level == \"run\":\n run_susan.inputs.fwhm = smoothing_fwhm\n run_susan.inputs.dimension = dimensionality\n estimate_model.inputs.mask_size = smoothing_fwhm\n workflow.connect(\n [\n (wrangle_volumes, mean_img, [(\"functional_file\", \"in_file\")]),\n (wrangle_volumes, median_img, [(\"functional_file\", \"in_file\")],),\n (getter, mean_img, [(\"mask_files\", \"mask_file\")]),\n (getter, median_img, [(\"mask_files\", \"mask_file\")]),\n (mean_img, merge, [(\"out_file\", \"in1\")]),\n (median_img, merge, [(\"out_stat\", \"in2\")]),\n (wrangle_volumes, run_susan, [(\"functional_file\", \"in_file\")]),\n (\n median_img,\n run_susan,\n [((\"out_stat\", utils.get_btthresh), \"brightness_threshold\",)],\n ),\n (merge, run_susan, [((\"out\", utils.get_usans), \"usans\")]),\n (getter, mask_functional, [(\"mask_files\", \"mask_file\")]),\n (run_susan, mask_functional, [(\"smoothed_file\", \"in_file\")]),\n (mask_functional, specify_model, [(\"out_file\", \"functional_runs\")],),\n (mask_functional, fit_model, [(\"out_file\", \"functional_data\")],),\n ]\n )\n\n else:\n workflow.connect(\n [\n (getter, mask_functional, [(\"mask_files\", \"mask_file\")]),\n (wrangle_volumes, mask_functional, [(\"functional_file\", \"in_file\")],),\n (mask_functional, specify_model, [(\"out_file\", \"functional_runs\")],),\n (mask_functional, fit_model, [(\"out_file\", \"functional_data\")],),\n ]\n )\n\n workflow.connect(\n [\n (get_info, specify_model, [(\"repetition_time\", \"time_repetition\")],),\n (specify_model, fit_model, [(\"session_info\", \"session_info\")]),\n (\n get_info,\n fit_model,\n [(\"repetition_time\", \"interscan_interval\"), (\"run_contrasts\", \"contrasts\")],\n ),\n (\n fit_model,\n first_level_design,\n [\n (\"interscan_interval\", \"interscan_interval\"),\n (\"session_info\", \"session_info\"),\n (\"contrasts\", \"contrasts\"),\n ],\n ),\n (first_level_design, generate_model, [(\"fsf_files\", \"fsf_file\")]),\n (first_level_design, generate_model, [(\"ev_files\", \"ev_files\")]),\n ]\n )\n\n if detrend_poly:\n workflow.connect(\n [\n (generate_model, correct_matrices, [(\"design_file\", \"design_matrix\")],),\n (correct_matrices, plot_matrices, [(\"design_matrix\", \"mat_file\")],),\n (correct_matrices, estimate_model, [(\"design_matrix\", \"design_file\")],),\n ]\n )\n\n else:\n workflow.connect(\n [\n (generate_model, plot_matrices, [(\"design_file\", \"mat_file\")]),\n (generate_model, estimate_model, [(\"design_file\", \"design_file\")],),\n ]\n )\n\n workflow.connect(\n [\n (getter, plot_matrices, [(\"entities\", \"entities\")]),\n (generate_model, plot_matrices, [(\"con_file\", \"con_file\")]),\n (fit_model, estimate_model, [(\"functional_data\", \"in_file\")]),\n (generate_model, estimate_model, [(\"con_file\", \"tcon_file\")]),\n (estimate_model, calculate_p, [((\"zstats\", utils.flatten), \"in_file\")],),\n (\n estimate_model,\n collate,\n [\n (\"copes\", \"effect_maps\"),\n (\"varcopes\", \"variance_maps\"),\n (\"zstats\", \"zscore_maps\"),\n (\"tstats\", \"tstat_maps\"),\n ],\n ),\n (calculate_p, collate, [(\"out_file\", \"pvalue_maps\")]),\n (\n collate,\n collate_outputs,\n [\n (\"effect_maps\", \"effect_maps\"),\n (\"variance_maps\", \"variance_maps\"),\n (\"zscore_maps\", \"zscore_maps\"),\n (\"pvalue_maps\", \"pvalue_maps\"),\n (\"tstat_maps\", \"tstat_maps\"),\n (\"contrast_metadata\", \"metadata\"),\n ],\n ),\n (collate_outputs, ds_contrast_maps, [(\"out\", \"in_file\"), (\"metadata\", \"entities\")],),\n (\n collate_outputs,\n wrangle_outputs,\n [(\"metadata\", \"contrast_metadata\"), (\"out\", \"contrast_maps\")],\n ),\n ]\n )\n\n return workflow", "def on_train_begin(self, logs=None):\n f = open(self.log_file_path, \"a\")\n f.write(f\"{'=' * 5}{self.model_name}({self.hp_log_title}){'=' * 5}\\n\")\n f.close()", "def to_mlflow(\n self,\n tracking_uri: Optional[str] = None,\n experiment_id: Optional[int] = None,\n run_name: str = \"log_biometext_model\",\n input_example: Optional[Dict] = None,\n conda_env: Optional[Dict] = None,\n ) -> str:\n if tracking_uri:\n mlflow.set_tracking_uri(tracking_uri)\n\n # This conda environment is only needed when serving the model later on with `mlflow models serve`\n conda_env = conda_env or {\n \"name\": \"mlflow-dev\",\n \"channels\": [\"defaults\", \"conda-forge\"],\n \"dependencies\": [\n \"python=3.7.9\",\n \"pip>=20.3.0\",\n {\"pip\": [\"mlflow\", f\"biome-text=={__version__}\"]},\n ],\n }\n\n with tempfile.TemporaryDirectory() as tmpdir_name:\n file_path = Path(self.save(directory=tmpdir_name))\n\n with mlflow.start_run(\n experiment_id=experiment_id, run_name=run_name\n ) as run:\n mlflow.log_artifact(str(file_path), \"biometext_pipeline\")\n mlflow.pyfunc.log_model(\n artifact_path=\"mlflow_model\",\n python_model=BiomeTextModel(),\n artifacts={\n BiomeTextModel.ARTIFACT_CONTEXT: mlflow.get_artifact_uri(\n f\"biometext_pipeline/{file_path.name}\"\n )\n },\n input_example=input_example,\n conda_env=conda_env,\n )\n model_uri = os.path.join(run.info.artifact_uri, \"mlflow_model\")\n\n return model_uri", "def printModelAndTime(self):\n import time\n self._reporter.writeOutput(\"Model name = \" + self.modelName + '\\n' +\n \"Output directory = \" + self._outputDir_ + '\\n' +\n \"Time = \" + time.asctime() + '\\n')\n return", "def logging(self, function):\n avg_nms_time_per_step = sum(self.nms_times)/len(self.nms_times)\n avg_total_time_per_step = sum(self.total_times)/len(self.total_times)\n\n avg_min_latency = [x[0] for x in self.inference_times]\n avg_max_latency = [x[1] for x in self.inference_times]\n avg_latency = [x[2] for x in self.inference_times]\n\n function(\"Inference stats: image size {}x{}, batches per step {}, batch size {}, {} steps\".format(\n self.cfg.model.image_size, self.cfg.model.image_size, self.cfg.ipuopts.batches_per_step, self.cfg.model.micro_batch_size, len(self.total_times)\n ))\n function(\"--------------------------------------------------\")\n function(\"Inference\")\n function(\"Average Min Latency per Batch: {:.3f} ms\".format(1000 * sum(avg_min_latency)/len(self.inference_times)))\n function(\"Average Max Latency per Batch: {:.3f} ms\".format(1000 * sum(avg_max_latency)/len(self.inference_times)))\n function(\"Average Latency per Batch: {:.3f} ms\".format(1000 * sum(avg_latency)/len(self.inference_times)))\n function(\"Average Inference Throughput: {:.3f} img/s\".format(sum(self.inference_throughputs)/len(self.inference_throughputs)))\n function(\"--------------------------------------------------\")\n # TODO remove the NMS and end-to-end time report once NMS is on device\n function(\"End-to-end\")\n function(\"Average NMS Latency per Batch: {:.3f} ms\".format(1000 * avg_nms_time_per_step/self.cfg.ipuopts.batches_per_step))\n function(\"Average End-to-end Latency per Batch: {:.3f} ms\".format(1000 * avg_total_time_per_step/self.cfg.ipuopts.batches_per_step))\n function(\"End-to-end Throughput: {:.3f} img/s\".format(sum(self.total_throughputs)/len(self.total_throughputs)))\n function(\"==================================================\")\n\n if self.cfg.eval.metrics:\n self.compute_and_print_eval_metrics()", "def print_log(*content):\n now = datetime.datetime.now().strftime(\"%y-%m-%d %H:%M:%S\")\n print(\"MODEL INFO: \" + str(now)+ \" \", end='')\n print(*content)", "def on_train_begin(self, logs=None):\n self.start_time = datetime.datetime.now()\n print(f\"Starting training at {self.start_time}\")" ]
[ "0.70850366", "0.69937974", "0.6949326", "0.67462784", "0.66128474", "0.65032214", "0.6481456", "0.64282435", "0.6287046", "0.59985286", "0.59353966", "0.59068996", "0.58943534", "0.57941216", "0.5646695", "0.5579601", "0.54835516", "0.54797125", "0.5476407", "0.54430944", "0.5427412", "0.54086936", "0.5406854", "0.5392879", "0.5391474", "0.53489065", "0.5325979", "0.53237236", "0.53123295", "0.5294181" ]
0.73166275
0
Load PyFunc implementation. Called by ``pyfunc.load_model``.
def _load_pyfunc(path): # noqa: E501 _check_soft_dependencies("mlflow", severity="error") from mlflow.exceptions import MlflowException from mlflow.utils.model_utils import _get_flavor_configuration if os.path.isfile(path): serialization_format = SERIALIZATION_FORMAT_PICKLE _logger.warning( "Loading procedure in older versions of MLflow using pickle.load()" ) else: try: sktime_flavor_conf = _get_flavor_configuration( model_path=path, flavor_name=FLAVOR_NAME ) serialization_format = sktime_flavor_conf.get( "serialization_format", SERIALIZATION_FORMAT_PICKLE ) except MlflowException: _logger.warning( "Could not find sktime flavor configuration during model " "loading process. Assuming 'pickle' serialization format." ) serialization_format = SERIALIZATION_FORMAT_PICKLE pyfunc_flavor_conf = _get_flavor_configuration( model_path=path, flavor_name=pyfunc.FLAVOR_NAME ) path = os.path.join(path, pyfunc_flavor_conf["model_path"]) return _SktimeModelWrapper( _load_model(path, serialization_format=serialization_format) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_pyfunc(path):\n import tensorflow\n\n (\n tf_saved_model_dir,\n tf_meta_graph_tags,\n tf_signature_def_key,\n ) = _get_and_parse_flavor_configuration(model_path=path)\n\n loaded_model = tensorflow.saved_model.load( # pylint: disable=no-value-for-parameter\n export_dir=tf_saved_model_dir, tags=tf_meta_graph_tags\n )\n return _TF2Wrapper(model=loaded_model, infer=loaded_model.signatures[tf_signature_def_key])", "def load_pyfunc(path, run_id=None, suppress_warnings=False):\n if run_id:\n path = tracking.utils._get_model_log_dir(path, run_id)\n conf = _load_model_conf(path)\n model_py_version = conf.get(PY_VERSION)\n if not suppress_warnings:\n _warn_potentially_incompatible_py_version_if_necessary(model_py_version=model_py_version)\n if CODE in conf and conf[CODE]:\n code_path = os.path.join(path, conf[CODE])\n sys.path = [code_path] + _get_code_dirs(code_path) + sys.path\n data_path = os.path.join(path, conf[DATA]) if (DATA in conf) else path\n return importlib.import_module(conf[MAIN]).load_pyfunc(data_path)", "def from_function(cls, py_func, py_file):\n raise NotImplementedError", "def from_function(cls, py_func, py_file):\n raise NotImplementedError", "def load(self, func):\n self._init = func.__name__", "def load_code(mfile, fname):\n mname = mfile.split('.py')[0].replace('/', '.')\n try:\n mod = __import__(mname, fromlist=['model'])\n func = getattr(mod, fname)\n print(\"load {} {} {}\".format(mfile, func, func.__doc__))\n return func\n except ImportError:\n traceback.print_exc()\n msg = \"Please provide file name with 'def %s' implementation\" % fname\n msg += \"\\nThe file should be available in PYTHONPATH\"\n print(msg)\n raise", "def load(self):\n code = self.get_code()\n determ = self.get_deterministic()\n if self.uniquify:\n self.uniquify_name()\n self.store.load_user_function(self.get_name(), self.get_num_params(), code, deterministic=determ)", "def load(self):\n self.uniquify_name()\n code = self.get_code()\n self.store.load_user_function(self.get_name(), self.get_num_params(), code)", "def __init__(\n self,\n module: Union[module_utils.CompiledModule, None],\n function: Union[Callable[[TracedModule], None], None],\n _load_dict: Optional[Dict[str, Any]] = None,\n ):\n if _load_dict is None:\n # Extract metadata from module and function.\n self.module_name = module.module_name\n self.compiled_paths = module.compiled_paths\n self.backend_name = module.backend_info.backend_name\n self.backend_id = module.backend_info.backend_id\n self.backend_driver = module.backend_info.driver\n self.iree_serializable = module.iree_serializable()\n self.tflite_serializable = module.tflite_serializable()\n self.function_name = function.__name__\n self.function_sourcefile = inspect.getsourcefile(function)\n source, start_line = inspect.getsourcelines(function)\n self.function_line_numbers = (start_line, start_line + len(source))\n self.function_source = \"\".join(source)\n\n self.calls = []\n else:\n self.module_name = _load_dict[\"module_name\"]\n self.compiled_paths = _load_dict[\"compiled_paths\"]\n self.backend_name = _load_dict[\"backend_name\"]\n self.backend_id = _load_dict[\"backend_id\"]\n self.backend_driver = _load_dict[\"backend_driver\"]\n self.iree_serializable = _load_dict[\"iree_serializable\"]\n self.tflite_serializable = _load_dict[\"tflite_serializable\"]\n self.function_name = _load_dict[\"function_name\"]\n self.function_sourcefile = _load_dict[\"function_sourcefile\"]\n self.function_line_numbers = _load_dict[\"function_line_numbers\"]\n self.function_source = _load_dict[\"function_source\"]\n self.calls = _load_dict[\"calls\"]", "def load_model(self) -> Any:", "def load(path_to_model):\n pass", "def __init__(self, load_model_dir=None):\n \n if load_model_dir:\n raise RuntimeError('Whoops. Not implemented yet')\n \n ## Load pickeled preprocessing function (applied to raw features)\n ## Load pickeled postprocessing function (applied to labels before output)\n ## Load tf model", "def load_model(model_name):\n model_def_path = os.path.join(MODEL_DIR, model_name + \".py\")\n weights_path = os.path.join(MODEL_DIR, model_name + \".pth\")\n if six.PY3:\n import importlib.util\n\n spec = importlib.util.spec_from_file_location(model_name,\n model_def_path)\n mod = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(mod)\n else:\n import importlib\n dirname = os.path.dirname(model_def_path)\n sys.path.insert(0, dirname)\n module_name = os.path.splitext(os.path.basename(model_def_path))[0]\n mod = importlib.import_module(module_name)\n func = getattr(mod, model_name)\n net = func(weights_path=weights_path)\n net = modify_to_return_embeddings(net, model_name)\n return net", "def load_model(self, filename):\r\n pass", "def _load_from_file(self, filename, lambdify):\n\n expression = None\n function = None\n\n # check for / create the save folder for this expression\n folder = self.config_folder + '/' + filename\n if os.path.isdir(folder) is not False:\n # check to see should return function or expression\n if lambdify is True:\n if self.use_cython is True:\n # check for cython binaries\n saved_file = [sf for sf in os.listdir(folder)\n if sf.endswith('.so')]\n if len(saved_file) > 0:\n # if found, load in function from file\n if self.config_folder not in sys.path:\n sys.path.append(self.config_folder)\n saved_file = saved_file[0].split('.')[0]\n function_binary = importlib.import_module(\n filename + '.' + saved_file)\n function = getattr(function_binary, 'autofunc_c')\n # NOTE: This is a hack, but the above import command\n # imports both 'filename.saved_file' and 'saved_file'.\n # Having 'saved_file' in modules cause problems if\n # the cython autofunc wrapper is used after this.\n if saved_file in sys.modules.keys():\n del sys.modules[saved_file]\n\n if function is None:\n # if function not loaded, check for saved expression\n if os.path.isfile('%s/%s/%s' %\n (self.config_folder, filename, filename)):\n print('Loading expression from %s ...' % filename)\n expression = cloudpickle.load(open(\n '%s/%s/%s' % (self.config_folder, filename, filename),\n 'rb'))\n\n return expression, function", "def load_model(model_name, MODEL_DIR):\n model_def_path = os.path.join(MODEL_DIR, model_name + '.py')\n weights_path = os.path.join(MODEL_DIR, model_name + '.pth')\n mod = load_module_2or3(model_name, model_def_path)\n func = getattr(mod, model_name)\n net = func(weights_path=weights_path)\n return net", "def load(\n self,\n modelLoadPath\n ):\n pass", "def load_model(self, model_path: str):", "def _load(self, funcdesc):\n # Get the module and function names\n funcdesc_elts = funcdesc.split(\".\")\n module_name = \".\".join(funcdesc_elts[:-1])\n func_name = funcdesc_elts[-1]\n\n # Get the absolute path the the xml file description\n # COMPATIBILITY: module not defined in python 2.6\n python_version = sys.version_info\n if python_version[:2] <= (2, 6):\n __import__(module_name)\n else:\n importlib.import_module(module_name)\n module = sys.modules[module_name]\n\n return getattr(module, func_name)", "def load(self):\n\n raise NotImplementedError", "def loading_function(function):\n def call_with_loaded_data(hash, loader):\n data = loader.load(hash)\n return function(data)\n call_with_loaded_data.__name__ = function.__name__\n call_with_loaded_data.__qualname__ = function.__qualname__\n return call_with_loaded_data", "def load_module(cls, *args, **kwargs): # real signature unknown\n pass", "def load_module(cls, *args, **kwargs): # real signature unknown\n pass", "def load_module(cls, *args, **kwargs): # real signature unknown\n pass", "def load_function(self):\n self._fn = from_pickle(\n read_from_disk(os.path.join(self.location, FNCT_NM))\n )\n\n if self.farmer is not None:\n if self.farmer.fn is None:\n self.farmer.fn = self._fn\n else:\n # TODO: check equality?\n raise XYZError(\n \"Trying to load this Crop's function, {}, from \"\n \"disk but its farmer already has a function \"\n \"set: {}.\".format(self._fn, self.farmer.fn)\n )", "def load_c_functions(self):\n\n # Load shared object\n lib = ctypes.cdll.LoadLibrary(os.path.join(self.working_directory,\"models/doubly_constrained/flow_forward_models.so\"))\n lib2 = ctypes.cdll.LoadLibrary(os.path.join(self.working_directory,\"models/doubly_constrained/potential_function.so\"))\n\n # Load DSF procedure flow inference\n self.infer_flows_dsf_procedure = lib.infer_flows_dsf_procedure\n self.infer_flows_dsf_procedure.restype = ctypes.c_double\n self.infer_flows_dsf_procedure.argtypes = [ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ctypes.c_size_t,\n ctypes.c_double,\n ctypes.c_size_t,\n ctypes.c_bool,\n ctypes.c_bool]\n\n\n # Load Newton Raphson procedure flow inference\n self.infer_flows_newton_raphson = lib.infer_flows_newton_raphson\n self.infer_flows_newton_raphson.restype = None #ctypes.c_double\n self.infer_flows_newton_raphson.argtypes = [ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_double,\n ctypes.c_size_t,\n ctypes.c_size_t,\n ctypes.c_size_t,\n ctypes.c_size_t]\n\n # Load Iterative proportional filtering procedure flow inference\n self.infer_flows_ipf_procedure = lib.infer_flows_ipf_procedure\n self.infer_flows_ipf_procedure.restype = ctypes.c_double\n self.infer_flows_ipf_procedure.argtypes = [ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ctypes.c_size_t,\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ctypes.c_double,\n ctypes.c_bool]\n\n # Load Iterative proportional filtering procedure flow inference\n self.infer_flows_ipf_procedure_singly = lib.infer_flows_ipf_procedure_singly\n self.infer_flows_ipf_procedure_singly.restype = ctypes.c_double\n self.infer_flows_ipf_procedure_singly.argtypes = [ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ctypes.c_size_t,\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ctypes.c_double,\n ctypes.c_bool]\n\n # Load potential function\n self.potential_stochastic = lib2.potential_stochastic\n self.potential_stochastic.restype = ctypes.c_double\n self.potential_stochastic.argtypes = [ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ctypes.c_size_t]", "def load_model(self, path):\n pass", "def load_function(path):\r\n module_path, _, name = path.rpartition('.')\r\n return getattr(import_module(module_path), name)", "def load_function(path):\r\n module_path, _, name = path.rpartition('.')\r\n return getattr(import_module(module_path), name)", "def load_function(path):\r\n module_path, _, name = path.rpartition('.')\r\n return getattr(import_module(module_path), name)" ]
[ "0.7480354", "0.6761297", "0.67086977", "0.67086977", "0.64833236", "0.644096", "0.63043493", "0.6161545", "0.6015046", "0.599458", "0.5907236", "0.58926547", "0.58378094", "0.58029515", "0.5723951", "0.5716107", "0.56987995", "0.56742996", "0.5650106", "0.562576", "0.5615248", "0.5592893", "0.5592893", "0.5592893", "0.5585989", "0.5572584", "0.5568863", "0.5554513", "0.5554513", "0.5554513" ]
0.747926
1
sos factory function, returns a version specific SensorObservationService object
def SensorObservationService(url, version='1.0.0', xml=None): if version in ['1.0', '1.0.0']: return sos100.SensorObservationService_1_0_0.__new__(sos100.SensorObservationService_1_0_0, url, version, xml) elif version in ['2.0', '2.0.0']: return sos200.SensorObservationService_2_0_0.__new__(sos200.SensorObservationService_2_0_0, url, version, xml)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_sensor(self, sensor_key):\n sensor_class, service = self.services[sensor_key]\n\n return sensor_class(self.platform_name, sensor_key, service)", "def new_instance(valid, test_mode):\n database = SensorsTable(test_mode=test_mode)\n implementations = SensorGetter.get_sensor_implementations()\n return SensorUpdate(valid, database, implementations, Epidata)", "def __init__(self, name=None, dss=28, date=None, project='SolarPatrol'):\n self.logger = logging.getLogger(logger.name+\".Observation\")\n DR.Observation.__init__(self, name=date, date=date, dss=dss, \n project=project)\n self.extended_init()\n \n #self.obs =Astronomy.Ephem.DSS(dss)\n #y,d = date.split('/')\n #self.year = int(y); self.DOY = int(d)\n #projdatapath, self.sessionpath, rawdatapath = \\\n # DR.get_obs_dirs(project, dss, self.year, self.DOY,\n # datafmt=None)", "def build_sensor(sensor: dict) -> Type[Sensor]:\n sensor_type = sensor.get('type', 'default')\n sensor['type'] = sensor_type if sensor_type is not None else 'default'\n types = {\n 'motion': MotionSensor,\n 'door': ReedSwitch,\n 'window': ReedSwitch,\n }\n\n return types.get(sensor['type'], Sensor)(sensor)", "def sensor(self):\n return ProxyList(self, OxfordITC503.Sensor, range(3))", "def _create_sensor(knx_module: XKNX, config: ConfigType) -> XknxSensor:\n return XknxSensor(\n knx_module,\n name=config[CONF_NAME],\n group_address_state=config[SensorSchema.CONF_STATE_ADDRESS],\n sync_state=config[SensorSchema.CONF_SYNC_STATE],\n value_type=config[CONF_TYPE],\n )", "def create_info_service(\n es_wrapper: ElasticsearchWrapper = Depends(create_es_wrapper),\n) -> ApiInfoService:\n\n global _instance\n if not _instance:\n _instance = ApiInfoService(es_wrapper.client)\n return _instance", "def Swahili_Speech_Recognition_Service():\n\n # ensure an instance is created only the first time the factory function is called\n if _Swahili_Speech_Recognition_Service._instance is None:\n\n _Swahili_Speech_Recognition_Service._instance = _Swahili_Speech_Recognition_Service()\n\n # Initialize the tokenizer\n _Swahili_Speech_Recognition_Service._processor = Wav2Vec2Processor.from_pretrained(\"alokmatta/wav2vec2-large-xlsr-53-sw\")\n\n # Initialize the model\n _Swahili_Speech_Recognition_Service._model = Wav2Vec2ForCTC.from_pretrained(\"alokmatta/wav2vec2-large-xlsr-53-sw\")\n\n return _Swahili_Speech_Recognition_Service._instance", "def __init__(self):\n OWSReport.__init__(self)\n self.stats['type'] = 'OGC:SOS'\n self.stats['operations']['GetObservation'] = {}\n self.stats['operations']['GetObservation']['hits'] = 0\n self.stats['operations']['GetObservation']['resource'] = {}\n self.stats['operations']['GetObservation']['resource']['param'] = 'observedproperty'\n self.stats['operations']['GetObservation']['resource']['list'] = {}\n self.stats['operations']['DescribeSensor'] = {}\n self.stats['operations']['DescribeSensor']['hits'] = 0", "def serviceProvider(self, iTag, srvType, addr):\r\n return ROSServiceProvider(self, iTag, srvType, addr)", "def service(self) -> BaseService:", "def get_factory():", "def sdc_service(self) -> Service:\n if not self._sdc_service:\n self._sdc_service = Service.get_by_unique_uuid(self.model_invariant_id)\n return self._sdc_service", "def new_instance(cls,\n version: date,\n service_name: str = DEFAULT_SERVICE_NAME,\n ) -> 'DirectLinkApisV1':\n if version is None:\n raise ValueError('version must be provided')\n\n authenticator = get_authenticator_from_environment(service_name)\n service = cls(\n version,\n authenticator\n )\n service.configure_service(service_name)\n return service", "def __init__(self, sensor):\n self.sensor = sensor\n self.sensor.update()", "def createSensorVariable(self, product, variable_name):\r\n\r\n sensor_variable_dict = {'name': variable_name,\r\n 'dtype': None,\r\n 'vtype': 'sensor',\r\n 'units': None,\r\n 'ndims': None,\r\n 'shape': None}\r\n\r\n sensor_variable = Variable(sensor_variable_dict)\r\n\r\n return sensor_variable", "def gen_svn_updated_factory2(baseURL, configure_opts=[]):\n return core_factory(baseURL=baseURL, usedocs=True, audit=True,\n configure_opts=configure_opts)", "def get(cls) -> 'CommonService':\n raise NotImplementedError()", "def getObservation(self):\n sensors = self.env.getSensors()\n if self.sensor_limits:\n sensors = self.normalize(sensors)\n return sensors", "def ICSClientFactory(full_config, slave_device, master_device, \n icsiface_index=0, clientiface_index=0):\n slave_config = full_config['vdevs'][slave_device]\n master_config = full_config['vdevs'][master_device]\n if not slave_config['icsifaces'] or not master_config['clientifaces']:\n return None #NOTE: We may want an exception thrown here\n \n to_config = slave_config['icsifaces'][icsiface_index]\n from_config = master_config['clientifaces'][clientiface_index]\n\n className = to_config['typ']\n cls = globals()[className] #Gets class from this module\n return cls(to_config, slave_config['points'], from_config)", "def gen_svn_updated_factory(baseURL, usedocs=True, clean=False):\n return core_factory(baseURL=baseURL, usedocs=usedocs, clean=clean)", "def serviceClient(self, iTag, srvType, addr):\r\n return ROSServiceClient(self, iTag, srvType, addr)", "def init_od_sr(state_dict: Dict) -> SpectralResidual:\n od = SpectralResidual(threshold=state_dict['threshold'],\n window_amp=state_dict['window_amp'],\n window_local=state_dict['window_local'],\n n_est_points=state_dict['n_est_points'],\n n_grad_points=state_dict['n_grad_points'])\n return od", "def __getitem__(self, name):\r\n return Service(self, name)", "def get_factory():\n # Get config from Django settings\n sdk_config = settings.SPLITIO\n api_key = settings.SPLITIO.get('apiKey', '')\n\n return get_splitio_factory(\n api_key,\n config=sdk_config,\n **{k: sdk_config[k] for k in ('sdk_api_base_url', 'events_api_base_url', 'auth_api_base_url', 'streaming_api_base_url') if k in sdk_config}\n )", "def sensor(self , sensor_index):\n sensor = obd_sensors.SENSORS[sensor_index]\n try:\n r = self.get_sensor_value(sensor)\n except \"NORESPONSE\":\n r = \"NORESPONSE\"\n return (sensor.name,r, sensor.unit)", "def get_service(self):", "def create_api_service(endpoint):\n # FLOC-1162 should add an API version prefix and integration with\n # DatasetAPIUser.\n return StreamServerEndpointService(endpoint, Site(Resource()))", "def to_api_service(self):\n return DSSAPIService(self.client, self[\"projectKey\"], self[\"id\"])", "def __init__(self, coordinator, sensor):\n self._sensor = sensor\n self.coordinator = coordinator\n self._name = DOMAIN.title()\n\n self._state = None" ]
[ "0.5936116", "0.5500027", "0.54106855", "0.5394249", "0.535936", "0.532393", "0.5269507", "0.5235204", "0.52062535", "0.517184", "0.5136265", "0.51114684", "0.5089523", "0.5087797", "0.5068168", "0.50676584", "0.5035795", "0.5032768", "0.50292856", "0.50230336", "0.50151926", "0.5010246", "0.5009591", "0.50049275", "0.50016177", "0.50003284", "0.49904105", "0.49730745", "0.4970305", "0.49574324" ]
0.8053189
0
Download and save the dataneededtorender for builds and jobs. We ask jenkins what builds it knows about for the given jobs, then download them all to get a `.data` file that is suitable for passing as input to this script (at some later date) to graph this build.
def download_builds(config, builds, force=False): if not config.get('datadir'): raise ValueError("No output dir (--datadir) specified") jenkins_client = jenkins.get_client(config) download_args = [] for build in builds: if ':' in build: (job, build_id) = build.split(':') download_args.append( (job, build_id, config['datadir'], jenkins_client, config.get('groupingParameter'), force) ) else: job = build for build_id in jenkins_client.fetch_all_build_ids(job): download_args.append( (job, build_id, config['datadir'], jenkins_client, config.get('groupingParameter'), force) ) num_threads = config.get('downloadThreads', 7) # arbitrary number if num_threads <= 1: for args_tuple in download_args: _download_one_build(args_tuple) else: import multiprocessing.pool # only import if we need it! pool = multiprocessing.pool.ThreadPool(num_threads) pool.map(_download_one_build, download_args)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def export_buildings_download(request):\n body = json.loads(request.body)\n export_id = body.get('export_id')\n\n export_subdir = exporter._make_export_subdirectory(export_id)\n keys = list(DefaultStorage().bucket.list(export_subdir))\n\n if not keys or len(keys) > 1:\n return {\n \"success\": False,\n \"status\": \"error\",\n }\n\n download_key = keys[0]\n download_url = download_key.generate_url(900)\n\n return {\n 'success': True,\n \"status\": \"success\",\n \"url\": download_url\n }", "def download(self):\n\n with open(self.dataset_path) as dataset_file:\n dataset = json.load(dataset_file)\n\n path = \"\".join([POST_HIT_PATH, dataset[\"dataset\"][\"data_path\"]])\n if not os.path.exists(path):\n os.makedirs(path)\n\n protocole = dataset[\"dataset\"][\"protocole\"]\n\n download_links = []\n\n for resource in dataset[\"dataset\"][\"resources\"]:\n file_path = \"\".join([path, resource[\"filename\"]])\n\n #Check if the the download link has not been used before (One download link for all)\n if resource[\"download_link\"] not in download_links:\n \n print(\"DOWNLOADING : {}\".format(resource[\"filename\"]))\n f = urllib.request.urlopen(resource[\"download_link\"])\n data = f.read()\n with open(file_path, \"wb\") as donwload_file:\n donwload_file.write(data)\n\n download_links.append(resource[\"download_link\"])\n\n \n #Extract all files from the tar archives if necessary\n if tarfile.is_tarfile(file_path):\n tf = tarfile.open(file_path)\n tf.exractall()", "def job_download(self, job_id):\n\n target = QFileDialog.getExistingDirectory(self, 'Where to save the resulting files?')\n if target:\n paths = self.backend.job_result_download(job_id, target)\n info(self.iface, \"Successfully Downloaded to {}\".format(paths))", "def fetch_exe_from_jenkins():\n base_job_url = os.environ.get(\"JENKINS_JOB_URL\")\n if not base_job_url:\n print \"Jenkins job URL for the builder is not specified.\"\n sys.exit(-1)\n\n build_json = json.loads(requests.get(\"%s/api/json\" % base_job_url).text)\n last_build = build_json['lastCompletedBuild']['number']\n print \"Last build ID: %d\" % last_build\n\n job_url = '%s/%d' % (base_job_url, last_build)\n last_build_json = json.loads(requests.get(\"%s/api/json\" % job_url).text)\n if len(last_build_json['artifacts']) == 0:\n error(\"No artifacts found!\")\n\n artifact_url = \"%s/artifact/%s\" % (job_url, last_build_json['artifacts'][0]['relativePath'])\n file_name = last_build_json['artifacts'][0]['fileName']\n print \"Tribler installer url: %s\" % artifact_url\n\n # Download the file\n file_path = os.path.join(os.environ.get('WORKSPACE'), file_name)\n download_response = requests.get(artifact_url, stream=True)\n download_response.raise_for_status()\n\n with open(file_path, 'wb') as handle:\n for block in download_response.iter_content(1024):\n handle.write(block)\n\n return file_path", "def download_report():\n entities = get_names()\n save_csv(entities)", "def fetch_executable_from_jenkins():\n\n base_job_url = os.environ.get('JENKINS_JOB_URL')\n if not base_job_url:\n error('Jenkins job URL for the builder is not specified.')\n\n build_json = json.loads(requests.get('%s/api/json'\n % base_job_url).text)\n last_build = build_json['lastCompletedBuild']['number']\n print 'Last build ID: %d' % last_build\n\n job_url = '%s/%d' % (base_job_url, last_build)\n last_build_json = json.loads(requests.get('%s/api/json'\n % job_url).text)\n if not last_build_json['artifacts']:\n error('No artifacts found!')\n\n artifacts_deb = [artifact for artifact in\n last_build_json['artifacts'] if '.dmg'\n in artifact['fileName']]\n artifact_url = '%s/artifact/%s' % (job_url,\n artifacts_deb[0]['relativePath'])\n file_name = artifacts_deb[0]['fileName']\n print 'Tribler installer url: %s' % artifact_url\n\n # Download the file\n file_path = os.path.join(os.environ.get('WORKSPACE'), file_name)\n download_response = requests.get(artifact_url, stream=True)\n download_response.raise_for_status()\n\n with open(file_path, 'wb') as handle:\n for block in download_response.iter_content(1024):\n handle.write(block)\n\n return file_path", "def retrieve(cfg, jobs, filter):\n server = jenkins_utils.server_factory(cfg)\n retrieved = libjobs.retrieveJobs(server, jobs, filter)\n for job in retrieved:\n job_file = os.path.join(cfg.jobdir, job.name + '.xml')\n with open(job_file, 'w') as fh:\n fh.write(job.get_config())", "def export_buildings(request):\n body = json.loads(request.body)\n\n export_name = body.get('export_name')\n export_type = body.get('export_type')\n\n building_ids = body.get('building_ids')\n\n selected_fields = body.get('selected_fields', [])\n\n selected_building_ids = body.get('selected_buildings', [])\n\n project_id = body.get('project_id')\n\n if not body.get('select_all_checkbox', False):\n selected_buildings = get_search_query(request.user, {})\n selected_buildings = selected_buildings.filter(\n pk__in=selected_building_ids\n )\n else:\n selected_buildings = get_search_query(request.user, body)\n selected_buildings = selected_buildings.exclude(\n pk__in=selected_building_ids\n )\n\n export_id = str(uuid.uuid4())\n\n # If we receive a project ID, we don't actually want to export buildings,\n # we want to export ProjectBuildings -- but the frontend doesn't know that,\n # so we change the fieldnames on the backend instead so the exporter can\n # resolve them correctly\n if project_id:\n export_model = 'seed.ProjectBuilding'\n\n # Grab the project buildings associated with the given project id and\n # buildings list\n selected_building_ids = [\n x[0] for x in selected_buildings.values_list('pk')\n ]\n selected_buildings = ProjectBuilding.objects.filter(\n project_id=project_id,\n building_snapshot__in=selected_building_ids)\n\n # Swap the requested fieldnames to reflect the new point of reference\n _selected_fields = []\n for field in selected_fields:\n components = field.split(\"__\", 1)\n if (components[0] == 'project_building_snapshots'\n and len(components) > 1):\n _selected_fields.append(components[1])\n else:\n _selected_fields.append(\"building_snapshot__%s\" % field)\n selected_fields = _selected_fields\n else:\n export_model = 'seed.BuildingSnapshot'\n\n building_ids = [x[0] for x in selected_buildings.values_list('pk')]\n\n cache.set(\"export_buildings__%s\" % export_id, 0)\n\n tasks.export_buildings.delay(export_id,\n export_name,\n export_type,\n building_ids,\n export_model,\n selected_fields)\n\n return {\n \"success\": True,\n \"status\": \"success\",\n \"export_id\": export_id,\n \"total_buildings\": selected_buildings.count(),\n }", "def download():\n basedir = os.path.dirname(os.path.dirname(__file__))\n print(basedir)\n datadir = os.path.join(basedir,\"data/NeonTreeEvaluation/\")\n print(\"Downloading data files to {}\".format(datadir)) \n eval_url = zenodo_url(concept_rec_id=\"3723356\", datadir=datadir)", "def download_models_and_data():\n\n for file in DATA_FILES:\n download_file(file[\"url\"], file[\"path\"])", "def export_buildings_progress(request):\n body = json.loads(request.body)\n export_id = body.get('export_id')\n return {\n \"success\": True,\n \"status\": \"success\",\n \"buildings_processed\": cache.get(\"export_buildings__%s\" % export_id),\n }", "def download_datasets():\n if not os.path.exists(\"__data__/cornell/movie_conversations.txt\") \\\n or not os.path.exists(\"__data__/cornell/movie_lines.txt\"):\n subprocess.call(['scripts/download_cornell.sh'])\n if not os.path.isdir('__data__/opensubs'):\n subprocess.call(['scripts/download_opensubs.sh'])", "def get_building_complaint_results():\n data = None\n blob = BUCKET.blob(BUILDING_COMPLAINT_FNAME)\n if blob.exists():\n blob.reload(client=STORAGE_CLIENT)\n if blob.time_created.strftime(\"%Y-%m-%d\") == datetime.now().strftime(\"%Y-%m-%d\"):\n print(\"Getting cached file \" + BUILDING_COMPLAINT_FNAME)\n return {\"data\": __retrieve_from_bucket(BUILDING_COMPLAINT_FNAME)}\n \n df = update_building_complaint_results()\n data = df.to_dict(orient=\"records\")\n return {\"data\": data}", "def download_data_and_save():\n url = 'https://github.com/djay/covidthailand/wiki/combined.csv'\n s=requests.get(url).content\n global df\n global last_updated\n df=pd.read_csv(io.StringIO(s.decode('utf-8')), parse_dates= ['Date'])\n df.to_parquet(file_name, compression='UNCOMPRESSED')\n df.to_csv('jaydata.csv')\n last_updated = df['Date'][df.index[-1]].strftime(\"%d %B %Y\")\n\n url = 'https://raw.githubusercontent.com/wiki/djay/covidthailand/vaccinations.csv'\n s=requests.get(url).content\n global vac_df\n vac_df=pd.read_csv(io.StringIO(s.decode('utf-8')), parse_dates= ['Date'])\n vac_df.to_parquet('vaccination.parquet', compression='UNCOMPRESSED')\n\n print(\"Data downloaded and saved successfully. Data up to \" + last_updated)", "def createReports(self, jobs, retryCount = 0):\n\n\n report = Report()\n report.addStep('testStep', 0)\n\n for job in jobs:\n #reportPath = os.path.join(job['cache_dir'], 'Report.%i.pkl' % (retryCount))\n reportPath = job['fwjr_path']\n if os.path.exists(reportPath):\n os.remove(reportPath)\n report.save(reportPath)\n\n\n return", "def download_ground_truths(request):\n\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n path1 = os.path.join(workpath, './static/temp/temp.csv')\n path2 = os.path.join(workpath, './static/BioC/temp_files/to_download.csv')\n if os.path.exists(path1):\n os.remove(path1)\n if os.path.exists(path2):\n os.remove(path2)\n username = request.session['username']\n inst = request.GET.get('institute',None)\n if inst == '':\n inst = None\n else:\n inst = str(inst)\n use = request.GET.get('usec',None)\n if use == '':\n use = None\n else:\n use = str(use)\n report_type = request.GET.get('report_type',None)\n if report_type == '':\n report_type = None\n annotation_mode = request.GET.get('mode',None)\n if annotation_mode == '':\n annotation_mode = None\n lang = request.GET.get('lang',None)\n if lang == '':\n lang = None\n else:\n lang = str(lang)\n batch = request.GET.get('batch','') # added 22/10/2021\n if batch == '' or batch == 'all':\n batch = None\n else:\n batch = int(batch)\n\n all = request.GET.get('all_gt',None)\n action = request.GET.get('action',None)\n format = request.GET.get('format',None)\n json_resp = {}\n json_resp['ground_truth'] = []\n if format == 'json' or all =='all' :\n json_resp = create_json_to_download(report_type,action,username,use,annotation_mode,inst,lang,all,batch)\n return JsonResponse(json_resp)\n\n elif format == 'csv':\n response = HttpResponse(content_type='text/csv')\n resp = create_csv_to_download(report_type,annotation_mode,username,use,inst,lang,action,response,batch)\n return resp\n\n elif format == 'biocxml':\n json_keys_to_display = request.session['fields']\n json_keys_to_ann = request.session['fields_to_ann']\n if report_type == 'pubmed':\n json_keys_to_display = ['year','authors','volume','journal']\n json_keys_to_ann = ['title','abstract']\n json_keys = json_keys_to_display + json_keys_to_ann\n resp = generate_bioc(json_keys,json_keys_to_ann,username,action,lang,use,inst,'xml',annotation_mode,report_type,batch)\n return HttpResponse(resp,content_type='application/xml')\n\n elif format == 'biocjson':\n json_keys_to_display = request.session['fields']\n json_keys_to_ann = request.session['fields_to_ann']\n json_keys = json_keys_to_display + json_keys_to_ann\n resp = generate_bioc(json_keys,json_keys_to_ann,username,action,lang,use,inst,'json',annotation_mode,report_type,batch)\n return HttpResponse(resp,content_type='application/xml')", "def UpdateBuilds(builds):\n\n # The build data file records the last build number for which we\n # generated a report. When we generate the next report, we read\n # this data and increment it to get the new data; when we finish\n # generating the reports, we write the updated values into this file.\n # NOTE: One side effect of doing this at the end: If the script\n # fails in the middle of generating a report, this data does not get\n # updated.\n with open(BUILD_DATA_FILE, 'w') as fp:\n gcc_max = 0\n llvm_max = 0\n for b in builds:\n if b[0] == GCC_ROTATING_BUILDER:\n gcc_max = max(gcc_max, b[1])\n elif b[0] == LLVM_ROTATING_BUILDER:\n llvm_max = max(llvm_max, b[1])\n else:\n fp.write('%s,%d\\n' % (b[0], b[1]))\n if gcc_max > 0:\n fp.write('%s,%d\\n' % (GCC_ROTATING_BUILDER, gcc_max))\n if llvm_max > 0:\n fp.write('%s,%d\\n' % (LLVM_ROTATING_BUILDER, llvm_max))", "def download_json(self):\n # create directories for threads and images if they don't exist\n if not self.path.is_dir():\n self.path.mkdir(parents=True)\n if not self.images_path.is_dir():\n self.images_path.mkdir(parents=True)\n\n # open file, send request and write data to a file\n with self.file.open('w') as json_file:\n try:\n json_data = json.dumps(requests.get(self.endpoint).json())\n json_file.write(json_data)\n except json.JSONDecodeError as error:\n print(\"Error fetching json: \", error)", "def build(cfg, jobs, watch):\n libjobs.buildJobs(cfg, jobs, watch)", "def get_data(self, job_id, job_outgoing_dir, **kwargs):\n job_output_path = kwargs['job_output_path']\n fs_output_path = os.path.join(self.base_dir, job_output_path)\n fs_rel_file_paths = []\n\n for root, dirs, files in os.walk(job_outgoing_dir):\n rel_path = os.path.relpath(root, job_outgoing_dir)\n if rel_path == '.':\n rel_path = ''\n fs_path = os.path.join(fs_output_path, rel_path)\n os.makedirs(fs_path, exist_ok=True)\n\n for filename in files:\n local_file_path = os.path.join(root, filename)\n if not os.path.islink(local_file_path):\n try:\n shutil.copy(local_file_path, fs_path)\n except Exception as e:\n logger.error(f'Failed to copy file {local_file_path} for '\n f'job {job_id}, detail: {str(e)}')\n raise\n fs_rel_file_paths.append(os.path.join(rel_path, filename))\n\n data = {'job_output_path': job_output_path,\n 'rel_file_paths': fs_rel_file_paths}\n return io.BytesIO(json.dumps(data).encode())", "def job_display(self, job_id):\n job = self.backend.get_job(job_id)\n process_graph_job = self.backend.job_pg_info(job_id)\n download_dir = self.backend.job_result_download(job_id)\n failed_files = []\n if download_dir:\n for ddir in download_dir:\n info(self.iface, \"Downloaded to {}\".format(ddir))\n result = Result(path=ddir, process_graph=process_graph_job)\n if iface.activeLayer():\n crs_background = iface.activeLayer().crs().authid()\n QSettings().setValue('/Projections/defaultBehaviour', 'useGlobal')\n QSettings().setValue('/Projections/layerDefaultCrs', crs_background)\n else:\n QSettings().setValue('/Projections/defaultBehaviour', 'useGlobal')\n QSettings().setValue('/Projections/layerDefaultCrs', 'EPSG:4326')\n\n if job.title:\n title = job.title\n else:\n title = \"NoTitle\"\n\n if not result.display(layer_name=\"{}-{}\".format(title, job.created.strftime(\"%Y-%m-%d_%H-%M-%S\"))):\n failed_files.append(ddir)\n iface.zoomToActiveLayer()\n\n if failed_files:\n warning(self.iface, \"The following result files could not be loaded to layer: {}\"\n .format(str(failed_files).replace(\"[\", \"\").replace(\"]\", \"\")))\n\n self.refresh_jobs()", "def plot(self, job):\n # fill PlotJob with needed data if it doesn't exist\n # Plotter will look for the files it needs relative to the work directory\n # If this fails it will fall back to a baseline location if one was \n # Provided to cmake at the time this file was generated\n if job.dataPath == None :\n job.dataPath = \"Scenarios/\" + job.verificationDirectory + \"/baselines/\"\n \n if job.dataFile == None:\n job.dataFile = job.name + \"Results.zip\"\n \n if job.outputFilename==None:\n job.outputFilename=job.titleOverride+\".jpg\"\n \n if len(job.outputFilename.split(\".\"))==1:\n job.outputFilename+=\".jpg\"\n \n if job.imageWidth==None and job.imageHeight==None:\n job.imageWidth=1600\n job.imageHeight=800\n \n if not os.path.exists(job.dataPath):\n job.dataPath = os.path.join(job.basedir,job.dataPath)\n \n if not os.path.isfile(os.path.join(job.dataPath,job.dataFile)):\n job.dataPath = os.path.join(job.basedir,job.dataPath)\n \n if not job.fontSize:\n job.fontSize=22\n \n if not os.path.exists(os.path.dirname(job.outputDir)):\n os.mkdir(os.path.dirname(job.outputDir))\n \n self.drawgraph(job,os.path.join(job.dataPath,job.dataFile),os.path.join(job.outputDir,job.outputFilename))", "def download(cls):\n cls._check_folder()\n os.chdir(cls.VIEWS_PATH)\n # iterate documents\n for doc in cls._documents:\n design_doc = doc().view()\n if design_doc is None:\n continue\n bucket_name = design_doc.bucket.name\n # iterate viewtypes (i.e. spatial and views)\n for view_type, views in design_doc.ddoc.iteritems():\n save_dir = '%s/%s/%s' % (bucket_name, design_doc.name, view_type)\n try:\n # remove and recreate the dir\n shutil.rmtree(save_dir, ignore_errors=True)\n os.makedirs(save_dir)\n except OSError:\n pass\n for name, view in views.iteritems():\n if isinstance(view, unicode) and view_type=='spatial':\n spatial_file = '%s/%s.spatial.js' % (save_dir, name)\n with open(spatial_file, 'w') as f:\n f.write(view)\n print 'Downloaded: %s' % spatial_file\n if isinstance(view, dict) and 'map' in view:\n map_file = '%s/%s.map.js' % (save_dir, name)\n with open(map_file, 'w') as f:\n f.write(view['map'])\n print 'Downloaded: %s' % map_file\n if isinstance(view, dict) and 'reduce' in view:\n reduce_file = '%s/%s.reduce.js' % (save_dir, name)\n with open(reduce_file, 'w') as f:\n f.write(view['reduce'])\n print 'Downloaded: %s' % reduce_file\n pass", "def import_builds_for_job(job_pk):\n job = Job.objects.get(pk=job_pk)\n\n logging.info(\"Located job %s\\n\" % job)\n\n client = job.server.get_client()\n\n logging.info(\"Using server at %s\\n\" % job.server.url)\n\n jenkins_job = client.get_job(job.name)\n\n good_build_numbers = list(jenkins_job.get_build_ids())\n logging.info(\"%s\\n\" % good_build_numbers)\n\n for build_number in good_build_numbers:\n import_build_for_job(job.pk, build_number)", "def download_build(self, name, dst_directory):\n logging.info('Not downloading build because no Filestore.')", "def build(self):\n logging.info(\"Building %s\", self.path)\n\n data = self.render()\n\n # Make sure a folder for the output path exists\n try: os.makedirs(os.path.dirname(self.paths['full-build']))\n except OSError: pass\n\n # Write the data to the output file\n f = codecs.open(self.paths['full-build'], 'w', 'utf-8')\n f.write(data)\n f.close()\n\n # Run all plugins\n self.site.pluginMethod('postBuildPage', self.site, self.paths['full-build'])", "def get_job_builds(self, job_id, started=None, finished=None,\n success=None, skipped=None, order='asc', limit=100):\n pass", "def get_json_job_details(buildurl):\n return requests.get(buildurl + \"/api/json\").json()", "def download():\n env_banner()\n\n download_data = Download()\n download_data()\n click.echo('Download done.')", "def save_data(self):\n # Command to get the download data\n pass" ]
[ "0.5980997", "0.5846999", "0.58016133", "0.5684989", "0.56449175", "0.5640449", "0.56289864", "0.56019944", "0.5526899", "0.5520561", "0.55028105", "0.5481336", "0.54385823", "0.542231", "0.53897566", "0.53834313", "0.53440076", "0.5336366", "0.5249497", "0.52163655", "0.5207568", "0.5205024", "0.51963824", "0.5148947", "0.51457375", "0.5143953", "0.5141961", "0.51350075", "0.5126189", "0.51202154" ]
0.62587667
0
Asserts a site visitor can GET the `analyse` screen
def test_analyse_screen(client): path = reverse('text_analysis:analyse') response = client.get(path) assert response.status_code == 200, 'Should return an `OK` status code'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_visit(self, client, site, landing_page):\n response = client.get(landing_page.relative_url(site))\n assert response.status_code == 200", "def test_accessible(self):\n survey = Survey.objects.get(id=2)\n responses = Response.objects.filter(survey=survey)\n response = responses.all()[0]\n urls = [\n reverse(\"survey-list\"),\n reverse(\"survey-detail\", kwargs={\"id\": 2}),\n reverse(\"survey-completed\", kwargs={\"id\": 2}),\n reverse(\"survey-detail-step\", kwargs={\"id\": 2, \"step\": 1}),\n reverse(\"survey-confirmation\", kwargs={\"uuid\": response.interview_uuid}),\n ]\n for url in urls:\n self.assert_accessible(url)", "def test_visit(self, client, site, content_page):\n response = client.get(content_page.relative_url(site))\n assert response.status_code == 200", "def assert_accessible(self, url):\n try:\n response = self.client.get(url, follow=True)\n self.assertEqual(response.status_code, 200)\n self.login()\n response = self.client.get(url, follow=True)\n self.assertEqual(response.status_code, 200)\n self.logout()\n except Exception as exc: # pragma: no cover\n exc.args += ((url),)\n raise", "def test_main_page(self):\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n content = response.content.decode('utf-8')\n self.assertTrue('Improving the FOIA request experience' in content)", "def test_homepage(self):\n rv = self.app.get('/')\n assert 'Enter your url here' in rv.data", "def test_landing_page(self):\n response = self.app.get(\"/\", follow_redirects=True)\n self.assertEqual(response.status_code, 200)\n\n res_txt = response.get_data(as_text=True)\n\n self.assertIn(\"input\", res_txt)\n self.assertIn(\"button\", res_txt)\n self.assertIn(\"Welcome to\", res_txt)", "def test_func(self):\n return self.request.user.has_permission(\"core.view_staffer\")", "def test_landing_page(self):\n # Create a test client\n client = server.app.test_client()\n\n # Use the test client to make requests\n result = client.get('/', follow_redirects=True)\n\n # Compare result.data with assert method\n self.assertIn(b'<p class=\"navbar-text\">Already have an account?</p>', \n result.data)", "def test_get_site_scans(self):\n pass", "def test_visit(self, client, site, homepage):\n response = client.get(homepage.relative_url(site))\n assert response.status_code == 200", "def _verify_page(self):", "def test_show_on_homepage(self) -> None:\n self.assert_show_on_homepage(apps.wakeup.main.Controller)", "def test_has_permission(self):\n self.assertStatusCode(self.url, 200)", "def test_has_permission(self):\n self.assertStatusCode(self.url, 200)", "def test_has_permission(self):\n self.assertStatusCode(self.url, 200)", "def test_has_permission(self):\n self.assertStatusCode(self.url, 200)", "def test_health(self):\n self.assert_request('get', '/_health')", "def test_views_appear(self):\n\t\t\n\t\t\"\"\"\t\n\n\t\tlogging.basicConfig(filename=\"Views.log\", level=logging.INFO, filemode='w')\n\t\t\n\t\tdriver = self.driver\n\t\tself.login()\n\t\tdetect_and_pass_all_wizards(driver)\n\t\t\n\t\tclick_menu_element(driver,\"Tactical view\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Status report\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Group view\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Summary of the status groups\" in driver.page_source,True)\n\t\ttime.sleep(2)\n\t\tclick_menu_element(driver,\"Tree view\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Tree search\" in driver.page_source,True)\n\t\ttime.sleep(2)\n\t\tclick_menu_element(driver,\"Agent detail\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Description\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Monitor detail\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Monitor status\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Alert details\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Alert control filter\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Agent/Alert view\")\t\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Agents / Alert templates\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Agent/Module view\")\n\t\tclick_menu_element(driver,\"Module groups\")\n\t\tclick_menu_element(driver,\"Real-time graphs\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Clear graph\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Inventory\")\n\t\tclick_menu_element(driver,\"Log viewer\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Export to CSV\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"SNMP console\")\n\t\tclick_menu_element(driver,\"SNMP browser\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Starting OID\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"SNMP trap editor\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Create\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"MIB uploader\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Index of attachment/mibs\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"SNMP filters\")\n\t\tclick_menu_element(driver,\"SNMP trap generator\")\t\t\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Host address\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Network map\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"There are no network maps defined yet\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Network console\")\n\t\tclick_menu_element(driver,\"Services\")\n\t\tclick_menu_element(driver,\"Visual console\")\n\t\tclick_menu_element(driver,\"Custom reports\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Create report\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Custom graphs\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Total items\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Main dashboard\")\n\t\tclick_menu_element(driver,\"Copy dashboard\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Replicate Dashboard\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Custom SQL\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Create custom SQL\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"View events\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Event control filter\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Statistics\")\n\t\tclick_menu_element(driver,\"Edit my user\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Password confirmation\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"WebChat\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Send message\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"List of Incidents\")\n\t\tclick_menu_element(driver,\"Statistics\") \n\t\tclick_menu_element(driver,\"Message list\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Create message\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"New message\")\n\t\tclick_menu_element(driver,\"Connected users\")\n\t\ttime.sleep(2)\n\t\tclick_menu_element(driver,\"Export data\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Source agent\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Scheduled downtime\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Execution type\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Recon view\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Task name\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"File repository\")\n\t\tclick_menu_element(driver,\"IPAM\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"IPAM\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Manage agents\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Create agent\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Custom fields\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Create field\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Component groups\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Create\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Module categories\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Create category\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Module types\")\n\t\tclick_menu_element(driver,\"Module groups\")\n\t\tclick_menu_element(driver,\"Insert Data\")\n\t\tclick_menu_element(driver,\"Resource exporting\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Export\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Resource registration\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Upload\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Manage agent groups\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Create group\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Module tags\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Create tag\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Enterprise ACL Setup\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Add\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Manage users\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Create user\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Profile management\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Create\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Connected users\")\n\t\ttime.sleep(2)\t\n\t\tclick_menu_element(driver,\"Network components\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Free Search\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Local components\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Search\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Module templates\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Create\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Inventory modules\")\n\t\tclick_menu_element(driver,\"Manage policies\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Create\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Collections\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Create\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Duplicate config\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Replicate configuration\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Agent operations\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"In order to perform massive operations\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Module operations\")\n\t\tclick_menu_element(driver,\"Plugin operations\")\n\t\tclick_menu_element(driver,\"User operations\")\n\t\ttime.sleep(2)\n\t\tclick_menu_element(driver,\"Alert operations\")\n\t\tclick_menu_element(driver,\"Policies operations\")\n\t\tclick_menu_element(driver,\"SNMP operations\")\n\t\tclick_menu_element(driver,\"Satellite Operations\")\n\t\tclick_menu_element(driver,\"List of Alerts\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Alert control filter\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Templates\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Create\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Actions\")\n\t\tclick_menu_element(driver,\"Commands\")\n\t\tclick_menu_element(driver,\"List of special days\")\n\t\tclick_menu_element(driver,\"Event alerts\")\t\n\t\tclick_menu_element(driver,\"SNMP alerts\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Maintenance\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Event filters\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Create new filter\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Custom events\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Update\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Event responses\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Create response\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Manage servers\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Saga\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Recon task\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Create\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Plugins\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Name\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Recon script\")\n\t\tclick_menu_element(driver,\"Export targets\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Create\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Register Plugin\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Upload\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Cron jobs\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Create\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"General Setup\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Pandora FMS Language settings\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Password policy\")\n\t\tclick_menu_element(driver,\"Enterprise\")\n\t\tclick_menu_element(driver,\"Historical database\")\n\t\tclick_menu_element(driver,\"Log Collector\")\n\t\ttime.sleep(2)\n\t\tclick_menu_element(driver,\"Authentication\")\n\t\tclick_menu_element(driver,\"Performance\")\n\t\tclick_menu_element(driver,\"Visual styles\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Behaviour configuration\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"eHorus\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Enable eHorus\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Edit OS\")\n\t\tclick_menu_element(driver,\"Licence\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Request new licence\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Skins\")\n\t\tclick_menu_element(driver,\"Translate string\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Search\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"System audit log\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"User\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Links\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Link name\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Diagnostic info\")\n\t\tclick_menu_element(driver,\"Site news\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Subject\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"File manager\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Index of images\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"DB information\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Module data received\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Database purge\")\n\t\tclick_menu_element(driver,\"Database debug\")\n\t\ttime.sleep(2)\n\t\tclick_menu_element(driver,\"Database audit\")\n\t\tclick_menu_element(driver,\"Database events\")\n\t\tclick_menu_element(driver,\"DB Status\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"DB settings\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"DB interface\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Run SQL query\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"API checker\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"IP\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"System Info\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Generate file\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Extension uploader\")\n\t\tclick_menu_element(driver,\"File repository manager\")\t\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Groups\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"System logfiles\")\n\t\tclick_menu_element(driver,\"Backup\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Description\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"CSV import\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Upload file\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"CSV import group\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Upload file\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"IPAM\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"Create\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Update Manager offline\")\n\t\tclick_menu_element(driver,\"Update Manager online\")\n\t\ttime.sleep(2)\n\t\tself.assertEqual(\"The last version of package installed is:\" in driver.page_source,True)\n\t\tclick_menu_element(driver,\"Update Manager options\")\n\t\t\n\t\tlogging.info(\"test_views_appear is correct\")\n\n\t\t\"\"\"", "def test_get_monitor_html(self):\n response = self.setup_get_html_test('/monitor')\n self.assertEqual(response.status_code, 200)", "def test_staff_access(self):\r\n out = self.c.get(self.url)\r\n print out\r\n self.assertTrue('Hints Awaiting Moderation' in out.content)", "def test_allow(self) -> None:\n response = self.request(\"/\", method=\"HEAD\")\n self.assert_allowed(response, (\"GET\", \"POST\"))", "def test_functionality(self):\n self.browserObject = globalVars.browserObject\n \n #Check for current logged in user\n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True)\n \n self.get_DashboardPage(\"Server Utilization\")\n \n self.get_DashboardPage(\"Total Server Utilization\")\n \n self.logout()", "def test_important_page(self):\n\n result = self.client.get(\"/\", follow_redirects=True)\n self.assertIn(\"Email\", result.data)", "def test_analysis_screen_with_clean_text(client, text_to_analyse):\n path = reverse('text_analysis:analysis')\n response = client.get(path, {'fulltext': text_to_analyse})\n assert response.status_code == 200, 'Should return an `OK` status code'", "def tela_inicial_do_challenge_1():\r\n # primeiro\r\n _url_site = \"http://rpachallenge.com/\"\r\n _current_url = _browser.current_url\r\n\r\n assert _current_url == _url_site", "def test_01_visit(self):", "def test_news_index_has_perm(self):\n self.assertStatusCode(self.url, 200)", "def test_professor_can_login_to_web_portal(professor):", "def test_01_search(self):\r\n res = self.app.get('/search')\r\n err_msg = \"Search page should be accessible\"\r\n assert \"Search\" in res.data, err_msg" ]
[ "0.6595508", "0.62993777", "0.62831616", "0.62774974", "0.62224185", "0.6215889", "0.60910827", "0.6065271", "0.6050251", "0.60461843", "0.6045932", "0.5955406", "0.59379095", "0.5934326", "0.5934326", "0.5934326", "0.5934326", "0.5933294", "0.5930834", "0.59113115", "0.5897449", "0.58503103", "0.58115286", "0.5787133", "0.57815266", "0.57659334", "0.57511973", "0.57424134", "0.5739822", "0.5739493" ]
0.7581896
0
Plots margin densities for sigma and width. Expected to be uniform.
def plot_margin_densities(bgm_fit, threshold): bgm = bgm_fit.named_steps['bayesiangaussianmixture'] weights = bgm.weights_ # n_components means = bgm.means_ # n_components x n_features covariances = bgm.covariances_ # n_components x n_features x n_features n_components = means.shape[0] n_features = 4 u = np.linspace(0.0,1.0,100, endpoint = True) m_sigma = np.zeros_like(u) m_width = np.zeros_like(u) for comp in range(n_components): comp_weight = weights[comp] comp_mean_sigma = means[comp,0] comp_mean_width = means[comp,1] comp_std_sigma = np.sqrt(covariances[comp,0,0]) comp_std_width = np.sqrt(covariances[comp,1,1]) m_sigma += comp_weight * norm.pdf(u, comp_mean_sigma, comp_std_sigma) m_width += comp_weight * norm.pdf(u, comp_mean_width, comp_std_width) fig, (ax1,ax2) = plt.subplots(2, 1, sharex = True, sharey = True, figsize = (12,12)) ax1.plot(u, m_sigma) ax2.plot(u, m_width) plt.savefig('{0}/margin_densities_thr{1}.png'.format(plotdir, threshold))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_density(sampler, threshold, sigma, width, n_random_samples = 10000):\n recX, labels = sampler.sample(n_random_samples)\n rec_t0 = recX[:,0]\n rec_amplitude = recX[:,1]\n generator.generate_pdf(threshold, sigma, width)\n fig = plt.figure(figsize = (12, 12))\n # pdf and random samples go to bottom right, margins on appropriate sides\n ax1 = plt.subplot2grid((12,12),(4,0), colspan = 9, rowspan = 8)\n pdf_map = ax1.contourf(generator.t0s, generator.amplitudes, generator.pdf, 10, cmap = 'Blues')\n ax1.scatter(rec_t0, rec_amplitude, s = 0.03, c = 'y')\n ax1.set_title('Probability density and random samples'.format(n_random_samples))\n ax1.set_xlabel('t0 [ns]')\n ax1.set_ylabel('amplitude [S/N]')\n ax1c = plt.subplot2grid((12,12), (1,9), rowspan = 3, colspan = 2)\n plt.colorbar(pdf_map, cax = ax1c, format = ticker.FuncFormatter(_fmt))\n ax2 = plt.subplot2grid((12,12),(1,0), colspan = 9, rowspan = 3, sharex = ax1)\n ax2.plot(generator.t0s[:,-1], generator.pdfu)\n ax2.hist(rec_t0, bins = generator.t0s[:,0], normed = True, alpha = 0.5)\n ax2.set_title('t0 margin distribution')\n ax2.set_ylabel('P(1 over)')\n plt.setp(ax2.get_xticklabels(), visible = False)\n ax3 = plt.subplot2grid((12,12),(4,9), rowspan = 8, colspan = 3, sharey = ax1)\n ax3.plot(generator.pdfv, generator.amplitudes[-1,:])\n ax3.hist(rec_amplitude, bins = generator.amplitudes[0,:], normed = True, orientation = 'horizontal', alpha = 0.5)\n ax3.set_title('Amplitude margin distribution')\n ax3.set_xlabel('P(1 over)')\n plt.setp(ax3.get_yticklabels(), visible = False)\n ax4 = plt.subplot2grid((12,12),(0,0), colspan = 9)\n ax4.text(0.5, 1.0, 'Exact P(one over) distribution and {0} random samples \\nthreshold : {1}, sigma : {2}, width : {3}'.format(n_random_samples, threshold, sigma, width), horizontalalignment = 'center', verticalalignment = 'top', fontsize = 18)\n ax4.set_axis_off()\n plt.tight_layout()\n plt.savefig('{0}/rng_test_thr{1}_sig{2}_w{3}.png'.format(plotdir, threshold, sigma, width))", "def draw_marginals(marg, markers=True):\n marg, doms, obs = marg\n n = len(marg)\n rows = int(math.ceil(n / 2.0))\n marg = sorted(marg.items())\n for i, (name, values) in enumerate(marg):\n if name in obs:\n plt.subplot(rows, 2, i + 1, axisbg=AXIS_OBSERVED_BG_COLOR)\n else:\n plt.subplot(rows, 2, i + 1)\n if markers:\n obj = plt.plot(values, '-o', linewidth=2, antialiased=True)\n else:\n obj = plt.plot(values, '-', linewidth=2, antialiased=True)\n for o in plt.gcf().findobj():\n o.set_clip_on(False)\n plt.ylim((0, 1))\n plt.legend(iter(obj), [name + '=' + str(d) for d in doms[name]])", "def margin(self):\r\n return self._generate_spacing_info(self.config['margin'])", "def plot_effect_sizes(self, plot_opts=dict()):\n \n if self.ndim == 1:\n fig, axes = plt.subplots(nrows=self.K, ncols=1, \n sharex=True, \n figsize=(6, 6*self.K))\n \n dmodel_color = plot_opts.get('dmodel_color', '#cc7d21')\n for i, kernel_name in enumerate(self.kernel_dict.keys()):\n summary = self.results[kernel_name].summary(b=self.b)\n# xmin, xmax = summary['es_interval'] \n pdf = summary['es_Disc']\n d_bma = summary['es_BMA'] \n xrange = summary['es_range'] \n n_interp = len(pdf)\n \n axes[i].plot(xrange, pdf, c=dmodel_color, label=r'$M_D$', \n linewidth=1.0, linestyle='--')\n axes[i].fill_between(xrange, pdf, np.zeros((n_interp)), \n alpha=0.3, color=dmodel_color)\n axes[i].axvline(x=0, linewidth=1.0, label=r'$M_C$', color='k', \n linestyle='--') \n axes[i].plot(xrange, d_bma, c='k', label=r'BMA', linewidth=1.0)\n axes[i].fill_between(xrange, d_bma, np.zeros((n_interp)), \n alpha=0.3, color='k')\n axes[i].set_xlim([np.min(xrange), np.max(xrange)])\n axes[i].set_ylabel('Probability density')\n axes[i].set_xlim([xrange[0], xrange[-1]])\n axes[i].set_title('{:s} kernel'.format(kernel_name))\n \n axes[-1].legend(loc='best')\n \n return fig, axes\n else:\n raise NotImplementedError('Effect size plot for D>1 is not implemented')\n # TODO: Part of this code is available in the Dutch elections example.", "def plot(sigma, strikes, dips):\n values, vectors = principal(sigma)\n sigma1, sigma2, sigma3 = vectors\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='stereonet')\n plt.hold(True)\n ax.density_contourf(strikes, dips)\n #ax.pole(strikes, dips, 'b.')\n ax.line(sigma1[0],sigma1[1], 'r^', label='sigma1', markersize=18)\n ax.line(sigma2[0],sigma2[1], 'g^', label='sigma2', markersize=18)\n ax.line(sigma3[0],sigma3[1], 'b^', label='sigma3', markersize=18)", "def test_sigma_plot():\n\n x = np.array([[1, 2]])\n P = np.array([[2, 1.2],\n [1.2, 2]])\n kappa = .1\n\n # if kappa is larger, than points shoudld be closer together\n\n sp0 = JulierSigmaPoints(n=2, kappa=kappa)\n sp1 = JulierSigmaPoints(n=2, kappa=kappa*1000)\n sp2 = MerweScaledSigmaPoints(n=2, kappa=0, beta=2, alpha=1e-3)\n sp3 = SimplexSigmaPoints(n=2)\n\n w0, _ = sp0.weights()\n w1, _ = sp1.weights()\n w2, _ = sp2.weights()\n w3, _ = sp3.weights()\n\n Xi0 = sp0.sigma_points(x, P)\n Xi1 = sp1.sigma_points(x, P)\n Xi2 = sp2.sigma_points(x, P)\n Xi3 = sp3.sigma_points(x, P)\n\n assert max(Xi1[:,0]) > max(Xi0[:,0])\n assert max(Xi1[:,1]) > max(Xi0[:,1])\n\n if DO_PLOT:\n plt.figure()\n for i in range(Xi0.shape[0]):\n plt.scatter((Xi0[i,0]-x[0, 0])*w0[i] + x[0, 0],\n (Xi0[i,1]-x[0, 1])*w0[i] + x[0, 1],\n color='blue', label='Julier low $\\kappa$')\n\n for i in range(Xi1.shape[0]):\n plt.scatter((Xi1[i, 0]-x[0, 0]) * w1[i] + x[0,0],\n (Xi1[i, 1]-x[0, 1]) * w1[i] + x[0,1],\n color='green', label='Julier high $\\kappa$')\n # for i in range(Xi2.shape[0]):\n # plt.scatter((Xi2[i, 0] - x[0, 0]) * w2[i] + x[0, 0],\n # (Xi2[i, 1] - x[0, 1]) * w2[i] + x[0, 1],\n # color='red')\n for i in range(Xi3.shape[0]):\n plt.scatter((Xi3[i, 0] - x[0, 0]) * w3[i] + x[0, 0],\n (Xi3[i, 1] - x[0, 1]) * w3[i] + x[0, 1],\n color='black', label='Simplex')\n\n stats.plot_covariance_ellipse([1, 2], P)", "def margin(x):\n s = 0.0\n for i in range(len(axes)):\n s = s + (x[i]-center[i])**2/axes[i]**2\n return s - 1.0", "def momentum_kde2_paperplot(fields):\n plt.figure(figsize=(2.65, 2.5))\n ax = plt.axes([0.18, 0.17, 0.8, 0.8])\n colorList = [med_color, high_color]\n lw = 1.5\n i = 0\n meankx_2 = []\n meankx_3 = []\n k_ax = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_ax_' + '2_' + \"E_{:.1e}.npy\".format(fields[0]))\n # ax.plot(k_ax, np.zeros(len(k_ax)), '-', linewidth=lw, color=eq_color, label='Equilibrium')\n # ax.plot(k_ax, np.zeros(len(k_ax)), '-', linewidth=lw, color=eq_color)\n ax.axhline(0, color='black', linestyle='--', linewidth=0.5)\n # ax.axvline(0, color='gray', linewidth=0.8, alpha=0.5)\n for ee in fields:\n ee_Vcm = ee/100\n k_ax = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_ax_' + '2_' + \"E_{:.1e}.npy\".format(ee))\n kdist_f0_2 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist_f0_' + '2_' + \"E_{:.1e}.npy\".format(ee))\n kdist_2 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist' + '2_' + \"E_{:.1e}.npy\".format(ee))\n kdist_f0_3 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist_f0_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n kdist_3 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist' + '3_' + \"E_{:.1e}.npy\".format(ee))\n\n chi_2_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '2_' + \"E_{:.1e}.npy\".format(ee))\n meankx_2.append(utilities.mean_kx(chi_2_i, electron_df))\n chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n meankx_3.append(utilities.mean_kx(chi_3_i, electron_df))\n\n ax.plot(k_ax, kdist_2, '--', linewidth=lw, color=colorList[i], label='Cold '+r'{:.0f} '.format(ee/100)+r'$\\rm V cm^{-1}$')\n ax.plot(k_ax, kdist_3, '-', linewidth=lw,color=colorList[i], label='Warm '+r'{:.0f} '.format(ee/100)+r'$\\rm V cm^{-1}$')\n i = i + 1\n # ax.plot(k_ax, kdist_f0_3, '--', linewidth=lw, color='black', label=r'$f_0$')\n # ax.plot(meankx_2,np.mean(abs(kdist_2))*np.ones(len(meankx_3)), '-', linewidth=lw, color='black')\n # ax.plot(meankx_3,np.mean(abs(kdist_3))*np.ones(len(meankx_3)), '-', linewidth=lw, color='black')\n\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))\n ax.locator_params(axis='y', nbins=6)\n ax.locator_params(axis='x', nbins=6)\n # ax.tick_params(direction='in')\n ax.set_xlim(-0.085, 0.081)\n\n plt.xlabel(r'$\\rm k_x \\, \\, (\\AA^{-1})$')\n plt.ylabel(r'Deviational occupation $\\rm \\Delta f_{\\mathbf{k}}$')\n # plt.grid(lw=0.8, linestyle='dotted')\n # plt.ylabel(r'$\\delta f_{\\mathbf{k}}/f_{\\mathbf{k}}^0$')\n # plt.ylim([-1,1])\n plt.legend(frameon=False,prop={'size':different_small_size})\n plt.savefig(pp.figureLoc+'momentum_KDE2.png', dpi=600)", "def _plot_marginal_pdfs( res, nbins=101, **kwargs):\n\tfrom matplotlib import pyplot as pl\n\timport numpy as np\n\n\tnparam = len(res.vparam_names)\n\t# nrow = np.sqrt( nparam )\n\t# ncol = nparam / nrow + 1\n\tnrow, ncol = 1, nparam\n\n\tpdfdict = _get_marginal_pdfs( res, nbins )\n\n\tfig = plt.gcf()\n\tfor parname in res.vparam_names :\n\t\tiax = res.vparam_names.index( parname )+1\n\t\tax = fig.add_subplot( nrow, ncol, iax )\n\n\t\tparval, pdf, mean, std = pdfdict[parname]\n\t\tax.plot( parval, pdf, **kwargs )\n\t\tif np.abs(std)>=0.1:\n\t\t\tax.text( 0.95, 0.95, '%s %.1f +- %.1f'%( parname, np.round(mean,1), np.round(std,1)),\n\t\t\t\t\t ha='right',va='top',transform=ax.transAxes )\n\t\telif np.abs(std)>=0.01:\n\t\t\tax.text( 0.95, 0.95, '%s %.2f +- %.2f'%( parname, np.round(mean,2), np.round(std,2)),\n\t\t\t\t\t ha='right',va='top',transform=ax.transAxes )\n\t\telif np.abs(std)>=0.001:\n\t\t\tax.text( 0.95, 0.95, '%s %.3f +- %.3f'%( parname, np.round(mean,3), np.round(std,3)),\n\t\t\t\t\t ha='right',va='top',transform=ax.transAxes )\n\t\telse :\n\t\t\tax.text( 0.95, 0.95, '%s %.3e +- %.3e'%( parname, mean, std),\n\t\t\t\t\t ha='right',va='top',transform=ax.transAxes )\n\n\tplt.draw()", "def plot_variation_distn(gene_vars: pd.DataFrame):\n plt.hist(gene_vars.median(axis=1), bins=100, alpha=0.4, label='median')\n plt.hist(gene_vars.mean(axis=1), bins=100, alpha=0.4, label='mean')\n plt.legend()", "def plotBeamSize( self, Srange, eps, delP, plane = 'x', scaleXY = 1e2, save = 0 ):\n from Tools import sigm\n from VisualSpecs import myColors as colors \n from VisualSpecs import align_yaxis\n\n condition = (self.df.S > self.Smax - Srange) & (self.df.S <= self.Smax)\n slFr = self.df[condition]\n print('slected last', Srange, 'm upstream. Scale factor =', scaleXY)\n # init the plot and split x\n #\n fig = plt.figure( figsize = (20,10) ); ax = fig.add_subplot(111)\n twin = ax.twinx()\n\n # plot physical aperture\n #\n maxAper = self.df.APER.max()\n print('maximum aperture found:', maxAper)\n\n ax.plot( slFr.S, slFr.APER*scaleXY, lw = 3., color = colors[11] )\n ax.plot( slFr.S, -slFr.APER*scaleXY, lw = 3., color = colors[11] )\n ax.set_ylabel('aperture [cm]'); ax.set_ylim( -(maxAper+maxAper/10)*scaleXY, (maxAper+maxAper/10)*scaleXY )\n\n \n twin.set_ylabel('beam size $\\\\sigma$ [cm]')\n \n if plane == 'x':\n\n twin.plot( slFr.S, sigm(slFr.BETX, slFr.DX, eps, delP, scaleXY), color = colors[2], label = '$\\\\sigma_x$' ) \n twin.plot( slFr.S, -sigm(slFr.BETX, slFr.DX, eps, delP, scaleXY), color = colors[2] )\n\n twin.plot( slFr.S, 10*sigm(slFr.BETX, slFr.DX, eps, delP, scaleXY), color = colors[3], ls = '--', label = '10$\\\\sigma_x$') \n twin.plot( slFr.S, -10*sigm(slFr.BETX, slFr.DX, eps, delP, scaleXY), color = colors[3], ls = '--' ) # \n\n twin.plot( slFr.S, 20*sigm(slFr.BETX, slFr.DX, eps, delP, scaleXY), color = colors[4], ls = ':', label = '20$\\\\sigma_x$' ) \n twin.plot( slFr.S, -20*sigm(slFr.BETX, slFr.DX, eps, delP, scaleXY), color = colors[4], ls = ':' ) # \n align_yaxis(ax, 0, twin, 0); twin.set_ylim( -(maxAper+maxAper/10)*scaleXY, (maxAper+maxAper/10)*scaleXY ) \n\n plt.legend() \n plt.title('horizontal beam size and physical aperture')\n if save: print('saving fig ...'); plt.savefig( self.plotpath + 'physAprt_hrzt_beamSize100m.pdf', bbox_inches = 'tight', dpi = 70)\n \n else:\n\n twin.plot( slFr.S, sigm(slFr.BETY, slFr.DY, eps, delP, scaleXY), color = colors[2], label = '$\\\\sigma_y$' ) \n twin.plot( slFr.S, -sigm(slFr.BETY, slFr.DY, eps, delP, scaleXY), color = colors[2] )\n\n twin.plot( slFr.S, 10*sigm(slFr.BETY, slFr.DY, eps, delP, scaleXY), color = colors[3], ls = '--', label = '10$\\\\sigma_y$') \n twin.plot( slFr.S, -10*sigm(slFr.BETY, slFr.DY, eps, delP, scaleXY), color = colors[3], ls = '--' ) # \n\n twin.plot( slFr.S, 20*sigm(slFr.BETY, slFr.DY, eps, delP, scaleXY), color = colors[4], ls = ':', label = '20$\\\\sigma_y$' ) \n twin.plot( slFr.S, -20*sigm(slFr.BETY, slFr.DY, eps, delP, scaleXY), color = colors[4], ls = ':' ) # \n align_yaxis(ax, 0, twin, 0); twin.set_ylim( -(maxAper+maxAper/10)*scaleXY, (maxAper+maxAper/10)*scaleXY )\n\n plt.legend()\n plt.title('vertical beam size and physical aperture')\n if save: print('saving fig ...'); plt.savefig( self.plotpath + 'physAprt_vrt_beamSize100m.pdf', bbox_inches = 'tight', dpi = 70)\n\n return fig", "def simplePlots() -> None:\r\n \r\n # Univariate data -------------------------\r\n \r\n # Make sure that always the same random numbers are generated\r\n np.random.seed(1234)\r\n \r\n # Generate data that are normally distributed\r\n x = np.random.randn(500)\r\n \r\n # Other graphics settings\r\n # Set \" context='poster' \" for printouts, and \"set_fonts(32)\"\r\n sns.set(context='notebook', style='ticks', palette='muted')\r\n \r\n # Set the fonts the way I like them\r\n set_fonts(16)\r\n \r\n # Scatter plot\r\n plt.plot(x, '.', markersize=7)\r\n plt.xlim([0, len(x)])\r\n \r\n # Save and show the data, in a systematic format\r\n printout('scatterPlot.jpg', xlabel='Datapoints', ylabel='Values', title='Scatter')\r\n \r\n # Histogram\r\n plt.hist(x)\r\n printout('histogram_plain.jpg', xlabel='Data Values',\r\n ylabel='Frequency', title='Histogram, default settings')\r\n \r\n plt.hist(x, 25, density=True)\r\n printout('density_histogram.jpg', xlabel='Data Values', ylabel='Probability',\r\n title='Density Histogram, 25 bins')\r\n \r\n # Boxplot\r\n # The ox consists of the first, second (middle) and third quartile\r\n set_fonts(18)\r\n plt.boxplot(x, sym='*')\r\n printout('boxplot.jpg', xlabel='Values', title='Boxplot')\r\n \r\n plt.boxplot(x, sym='*', vert=False)\r\n plt.title('Boxplot, horizontal')\r\n plt.xlabel('Values')\r\n plt.show()\r\n \r\n # Errorbars\r\n x = np.arange(5)\r\n y = x**2\r\n errorBar = x/2\r\n plt.errorbar(x,y, yerr=errorBar, fmt='o', capsize=5, capthick=3)\r\n plt.xlim([-0.2, 4.2])\r\n plt.ylim([-0.2, 19])\r\n printout('Errorbars.jpg', xlabel='Data Values', ylabel='Measurements', title='Errorbars')\r\n\r\n # SD for two groups\r\n weight = {'USA':89, 'Austria':74}\r\n weight_SD_male = 12\r\n plt.errorbar([1,2], weight.values(), yerr=weight_SD_male * np.r_[1,1],\r\n capsize=5, LineStyle='', marker='o')\r\n plt.xlim([0.5, 2.5])\r\n plt.xticks([1,2], weight.keys())\r\n plt.ylabel('Weight [kg]')\r\n plt.title('Adult male, mean +/- SD')\r\n\r\n show_data('SD_groups.jpg', out_dir='.')\r\n \r\n # Barplot\r\n # The font-size is set such that the legend does not overlap with the data\r\n np.random.seed(1234)\r\n set_fonts(16)\r\n \r\n df = pd.DataFrame(np.random.rand(7, 3), columns=['one', 'two', 'three'])\r\n df.plot(kind='bar', grid=False, color=sns.color_palette('muted'))\r\n \r\n show_data('barplot.jpg')\r\n\r\n # Bivariate Plots\r\n df2 = pd.DataFrame(np.random.rand(50, 3), columns=['a', 'b', 'c'])\r\n df2.plot(kind='scatter', x='a', y='b', s=df2['c']*500);\r\n plt.axhline(0, ls='--', color='#999999')\r\n plt.axvline(0, ls='--', color='#999999')\r\n printout('bivariate.jpg')\r\n \r\n sns.set_style('ticks')\r\n\r\n # Pieplot\r\n txtLabels = 'Cats', 'Dogs', 'Frogs', 'Others'\r\n fractions = [45, 30, 15, 10]\r\n offsets =(0, 0.05, 0, 0)\r\n \r\n plt.pie(fractions, explode=offsets, labels=txtLabels,\r\n autopct='%1.1f%%', shadow=True, startangle=90,\r\n colors=sns.color_palette('muted') )\r\n plt.axis('equal')\r\n printout('piePlot.jpg', title=' ')", "def plot2D_mass(df, sample, mass_var, mass_range, sgn, peak, pdf_key):\n\n for var in df.columns:\n if var != mass_var:\n fig, axs = plt.subplots(figsize=(15, 10))\n cax = plt.hist2d(df[mass_var],df[var],range=[mass_range, [df[var].min(), df[var].max()]], bins=100,\n norm=mpl.colors.LogNorm(), cmap=plt.cm.viridis)\n\n\n if sgn==1:\n plt.title('Signal candidates ' + sample, fontsize = 25)\n\n if sgn==0:\n plt.title('Background candidates ' + sample, fontsize = 25)\n\n\n plt.xlabel(mass_var, fontsize=25)\n plt.ylabel(var, fontsize=25)\n\n plt.vlines(x=peak,ymin=df[var].min(),ymax=df[var].max(), color='r', linestyle='-')\n\n mpl.pyplot.colorbar()\n\n plt.legend(shadow=True,title =str(len(df))+ \" samples\")\n\n fig.tight_layout()\n plt.savefig(pdf_key,format='pdf')\n pdf_key.close()", "def distribution_horizontale(args):\n number_files = [2,5,10,20];\n nbreFileNotDisplay = 0;\n comment = \"\";\n num_bins = args[\"num_bins\"];\n rep = args[\"path_save\"]+args[\"correction\"]+\\\n \"/data_p_\"+str(args[\"p_value\"])+\"/distribution/\";\n w = 4; h = 1; # width = largueur, height = longueur\n fig = plt.figure( figsize=(w,h) ); \n cpt_ax1 = 0;\n for num in number_files:\n print(\"num = \", num)\n num = int(num)\n cpt_ax1 += 1;#cpt = num; # cpt += 1\n \n # ax1\n ax1 = fig.add_subplot(2,len(number_files),cpt_ax1);\n df = pd.read_csv(rep+args[\"fichier_prefix\"] +str(num)+args[\"ext\"], \\\n names=[\"cpt\",\"moy_dc\",\"moy_dh\", \"nbre_aretes_matE\", \"correl_dh_dl\"], \\\n sep=';')\n N_graphs = df[\"moy_dc\"].count()\n \n # best fit of data\n (mu, sigma) = norm.fit(df[\"moy_dc\"])\n num_bins = df[\"moy_dc\"].max()+1\n bins = range(0,int(num_bins)); bins = range(0, 100)\n print(\"---> bins = \", bins, \" min = \",df[\"moy_dc\"].min(), \\\n \" max = \",df[\"moy_dc\"].max())\n \n max_count_dl, max_count_dh = count_max_df(df)\n \n sns.distplot(df[\"moy_dc\"], ax = ax1, bins = bins, kde = False)\n ax1.set(xlabel= \"moy_distance_correction\", ylabel= \"nombre_graphe\", \\\n title = \"distance de correction pour \\n \"+ str(num)+\\\n \" cases modifiees \\n $\\mu=%.3f,\\ \\sigma=%.3f$, \" %(mu, sigma)+ \\\n \" \\n $aretes = %.3f$\" %(df[\"nbre_aretes_matE\"].mean))\n ax1.plot([num+1,num+1], (0,max_count_dl), 'r--' )\n ax1.set_yticklabels(['{:3.2f}%'.format(x*100/N_graphs) \\\n for x in ax1.get_yticks()])\n \n # ax2\n cpt_ax2 = cpt_ax1 +len(number_files); #cpt = num+len(number_files); # cpt +=1 ;\n ax2 = fig.add_subplot(2,len(number_files),cpt_ax2);\n N_graphs = df[\"moy_dh\"].count()\n # best fit of data\n (mu, sigma) = norm.fit(df[\"moy_dh\"])\n \n num_bins = df[\"moy_dh\"].max()+1\n bins = range(0 ,int(num_bins)); bins = range(0, 100)\n\n sns.distplot(df[\"moy_dh\"], ax = ax2, bins = bins, kde = False, color = 'red')\n ax2.set(xlabel= \"moy_distance_hamming\", ylabel= \"nombre_graphe\", \\\n title = \"distance de Hamming pour \\n \"+ str(num)+ \\\n \" cases modifiees \\n $\\mu=%.3f,\\ \\sigma=%.3f$, \" %(mu, sigma) + \\\n \" \\n $aretes = %.3f$\" %(df[\"nbre_aretes_matE\"].mean()))\n# ax2.set_xticklabels(bins, rotation=90)\n ax2.plot([num+1,num+1], (0,max_count_dh), 'r--' )\n ax2.set_yticklabels(['{:3.2f}%'.format(x*100/N_graphs) \\\n for x in ax2.get_yticks()])\n \n for ax in [ax1,ax2]:\n for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +\n ax.get_xticklabels() + ax.get_yticklabels()):\n item.set_fontsize(8)\n \n# plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)\n plt.grid(True)\n comment += \"_horizontale\";\n plt.savefig(args[\"path_save\"]+args[\"correction\"]+\"/courbes/\"+\\\n \"distributionHorizontale_k_0_\"+str(number_files[len(number_files)-1])+\\\n \"_\"+comment+\".jpeg\", \\\n dpi= 190)\n pass", "def momentum_kde_paperplot(fields):\n fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, sharex=True)\n axisList = [ax1,ax2,ax3]\n i =0\n\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n for ee in fields:\n ee_Vcm = ee/100\n textstr = r'$E_{k_x}\\, = \\, %.1f \\, V \\, cm^{-1}$' % ee_Vcm\n k_ax = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_ax_' + '2_' + \"E_{:.1e}.npy\".format(ee))\n kdist_f0_2 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist_f0_' + '2_' + \"E_{:.1e}.npy\".format(ee))\n kdist_2 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist' + '2_' + \"E_{:.1e}.npy\".format(ee))\n\n kdist_f0_3 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist_f0_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n kdist_3 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist' + '3_' + \"E_{:.1e}.npy\".format(ee))\n axisList[i].fill(k_ax, kdist_2/np.max(kdist_f0_2), '--', linewidth=1, alpha=0.6, label='Cold '+r'$e^{-}$ '+r'$\\Delta f$',color='blue')\n axisList[i].fill(k_ax, kdist_3/np.max(kdist_f0_2), '--', linewidth=1, alpha=0.6, label='Warm '+r'$e^{-}$ '+r'$\\Delta f$',color='red')\n axisList[i].plot(k_ax, kdist_2/np.max(kdist_f0_2), '-', linewidth=1,color='blue')\n axisList[i].plot(k_ax, kdist_3/np.max(kdist_f0_2), '-', linewidth=1,color='red')\n axisList[i].plot(k_ax, kdist_f0_2/np.max(kdist_f0_2), '-', linewidth=1, label='Equilibrium Dist.',color='black')\n axisList[i].yaxis.set_major_formatter(FormatStrFormatter('%g'))\n axisList[i].locator_params(axis='y', nbins=3)\n axisList[i].locator_params(axis='x', nbins=5)\n axisList[i].set_xlim(-0.06,0.06)\n axisList[i].text(0.02, 0.92, textstr, transform=axisList[i].transAxes, verticalalignment='top', bbox=props)\n\n i = i+1\n plt.xlabel(r'$k_x \\, \\, (\\AA^{-1})$')\n ax2.set_ylabel('Occupation Probability (norm.)')\n axisList[0].legend(loc=\"upper right\")\n plt.savefig(pp.figureLoc+'momentum_KDE.png', bbox_inches='tight',dpi=600)", "def show_mdn_posterior_with_bootstrapping():\n\n fig = plt.figure()\n all_dist = np.array([])\n\n for iter in xrange(n_bootstrap_iter):\n\n # load approximate posterior\n _, approx_posterior, _, dist = helper.load(netsdir + 'mdn_svi_proposal_prior_{0}.pkl'.format(iter))\n\n # print means and variances\n m, S = approx_posterior.calc_mean_and_cov()\n print 'mixing coefficients = {0}'.format(approx_posterior.a)\n for i in xrange(4):\n print 'log theta {0}: true = {1:.2} \\t estimate = {2:.2} +/- {3:.2}'.format(i+1, np.log(true_params[i]), m[i], 2.0 * np.sqrt(S[i, i]))\n print ''\n\n # plot marginals\n helper.plot_pdf_marginals(pdf=approx_posterior, lims=[log_prior_min, log_prior_max], gt=np.log(true_params))\n\n # plot distance histograms\n ax = fig.add_subplot(2, n_bootstrap_iter/2, iter+1)\n ax.hist(dist, bins=int(np.sqrt(dist.size)))\n ax.set_title('iteration = {0}'.format(iter+1))\n ax.set_xlim([0.0, 12.0])\n all_dist = np.append(all_dist, dist)\n\n # plot distance trace\n _, ax = plt.subplots(1, 1)\n ax.plot(all_dist, '.')\n ax.set_xlabel('summary statistics samples')\n ax.set_ylabel('distance')\n\n plt.show(block=False)", "def plot_msd(msd, h_exp):\n fig, ax = plt.subplots(1, 2, figsize = (10, 10))\n av_msd = np.mean(msd, axis = 0)\n\n for p in np.arange(0, msd.shape[0], step = 1):\n for t in np.arange(0, msd.shape[1], step = 1): \n ax[0].plot(t, msd[p, t], 'bx')\n ax[1].plot(t, av_msd[t], 'ro')\n ax[0].set_xlabel('Time lag (number of steps)')\n ax[0].set_ylabel('MSD (pix^2)')\n ax[0].set_title('Individual TAMSDs: H = ' + str(h_exp))\n ax[1].set_xlabel('Time lag (number of steps)')\n ax[1].set_ylabel('MSD (pix^2)')\n ax[1].set_title('Averaged TAMSDs: H = ' + str(h_exp)) \n ax[0].set_xlim([0, np.max(t)])\n ax[1].set_xlim([0, np.max(t)])\n ax[0].set_ylim([0, np.max(msd)]) \n ax[1].set_ylim([0, np.max(av_msd)])", "def plot_r(f=500, d=100e-3, dr=0.01, picture_file=None, picture_formats=['png', 'pdf', 'svg']):#x_axis='r', \n import matplotlib.pyplot\n i = 0\n rs = []\n sigmas = []\n ys = []\n print \"r_soll ->\\tsigma ->\\tr\"\n datas = []\n for r in numpy.arange(0, 1+dr, dr) :\n for t in [0] :\n print \"%f\\t\" %(r),\n sigma = getSigma(r)\n print \"%f\\t\" % (sigma),\n rs.append(r)\n sigmas.append(sigma)\n v = getSynapticActivity(f=f, r=r, fireing_rate=1, duration=d, delay=t)\n #print v\n #matplotlib.pyplot.scatter(v, numpy.zeros( len(v) ) + i )\n r = vector_strength(f, v)\n print \"%f\" % (r)\n ys.append(r)\n i = i+1\n datas.append([sigma,r])\n numpy.savetxt(\"../../../Data/%.1f_%f@%i.dat\" % (getSigma(dr),dr,int(f*d)), datas) \n\n matplotlib.pyplot.figure()\n matplotlib.pyplot.xlabel('sigma')\n matplotlib.pyplot.ylabel('measured vector strength')\n matplotlib.pyplot.xlim(0, getSigma(dr))\n matplotlib.pyplot.ylim(0, 1)\n matplotlib.pyplot.grid()\n matplotlib.pyplot.scatter(sigmas,ys, marker='x', color='black')#, basex=10, basey=10, ls=\"-\"\n if(picture_file != None):\n for picture_format in picture_formats:\n matplotlib.pyplot.savefig(picture_file+'sigma_'+str(getSigma(dr))+'_'+str(int(f*d))+'.'+picture_format,format=picture_format)\n else:\n matplotlib.pyplot.show()\n\n matplotlib.pyplot.figure()\n matplotlib.pyplot.xlabel('aimed vector strength')\n matplotlib.pyplot.ylabel('measured vector strength')\n #matplotlib.pyplot.legend([\"based on %i examples / dot\" % (f*d) ], loc='best');\n matplotlib.pyplot.xlim(0, 1)\n matplotlib.pyplot.ylim(0, 1)\n matplotlib.pyplot.grid()\n\n matplotlib.pyplot.scatter(rs,ys, marker='x', color='black')\n if(picture_file != None):\n for picture_format in picture_formats:\n matplotlib.pyplot.savefig(picture_file+'_'+str(dr)+'_'+str(int(f*d))+'.'+picture_format,format=picture_format)\n else:\n matplotlib.pyplot.show()\n\n matplotlib.pyplot.close('all')\n datas = numpy.ndarray((len(datas),2), buffer=numpy.array(datas),dtype=float)\n return datas", "def show3(dlist,r=2,c=2,greyscale=False,output=False,samerange=True):\n\n#distrib.show3((d63[:128,:128,0]-1,d0[:128,:128,0]-1,N.log(d63[:128,:128,0]),d63ga[:128,:128,0]),greyscale=True)\n\n M.clf()\n\n fig = M.figure(figsize=(6.4, 6.4), dpi=100) \n axesarr=N.array([[0.01,0.51,0.4,0.4],\n [0.51,0.51,0.4,0.4],\n [0.01,0.01,0.4,0.4],\n [0.51,0.01,0.4,0.4]])\n\n print axesarr\n colorbax = 1.*axesarr\n print colorbax\n colorbax[:,2] = 0.*colorbax[:,2] + 0.03\n colorbax[:,0] += 0.4\n\n print colorbax\n\n if greyscale:\n colorscheme='binary'\n else:\n colorscheme='jet'\n\n # d63, d0, log d63, d63g\n titlearr=[r'$\\delta$',r'$\\delta_{\\rm initial}$',r'$\\log(1+\\delta)$',r'$\\delta_{\\rm Gauss}$']\n\n if (dlist[1] != None):\n min23 = min(min(dlist[2].flatten()),min(dlist[3].flatten()))\n max23 = max(max(dlist[2].flatten()),max(dlist[3].flatten()))\n\n max0 = max(dlist[1].flatten())\n min0 = min(dlist[1].flatten())\n\n initfact = min(max23/max0,min23/min0)\n print min23,max23, initfact\n\n sc = 0\n for d in dlist:\n if (d != None):\n M.axes(axesarr[sc])\n M.title(titlearr[sc],fontsize=23)\n if (sc > 1):\n print titlearr[sc]\n if (samerange):\n M.pcolor(d,cmap=M.get_cmap(colorscheme),vmin = min23,vmax=max23)\n else:\n M.pcolor(d,cmap=M.get_cmap(colorscheme))\n elif (sc == 1):\n #print min(d.flatten()*initfact),max(d.flatten()*initfact)\n if (samerange):\n M.pcolor(d*initfact,cmap=M.get_cmap(colorscheme),vmin = min23,vmax=max23)\n else:\n M.pcolor(d,cmap=M.get_cmap(colorscheme))\n\n else:\n M.pcolor(d,cmap=M.get_cmap(colorscheme))\n\n# if (sc == 1):\n# M.colorbar(ticks=[-0.1,-0.05,0,0.05,0.1])\n# else:\n\n M.axis('tight')\n M.axis('equal')\n M.axis('tight')\n M.xticks([])\n M.yticks([])\n\n cax = M.axes(colorbax[sc])\n M.colorbar(cax=cax)\n\n sc += 1\n\n #M.savefig('showdens.eps',dpi=8)\n #M.gcf().set_size_inches((6.4,6.4))\n #M.gcf().set_size_inches((15.,12.))\n if (output):\n if greyscale:\n M.savefig('showdens_grey.png',dpi=100)\n M.savefig('showdens_grey.pdf')\n else:\n fig.savefig('showdens.png',dpi=100)\n M.savefig('showdens.pdf')\n\n #M.show()", "def beamSigma(ax: plt.axes, trackResults, lattice):\n trackResults = trackResults.to(\"cpu\")\n\n pos = [lattice.endPositions[i % len(lattice.endPositions)] + i // len(lattice.endPositions) * lattice.totalLen\n for i in range(trackResults.size(2))]\n\n trackResults = trackResults.permute((1, 2, 0)) # dim, element, particle\n beamSigma = torch.std(trackResults, dim=2)\n\n # plt.plot(pos, beamSigma[0].numpy())\n # plt.show()\n # plt.close()\n ax.plot(pos, beamSigma[0].to(\"cpu\").numpy())\n return", "def plotDistribution(dist):\n # Create the figure\n fig, axs = plt.subplots(\n dpi=400,\n figsize=(3, 3),\n nrows=3,\n sharex=True,\n gridspec_kw={'height_ratios': [1, 3, 1]}\n )\n\n # Set up plot styles\n baseLineStyle = {\n \"color\": \"gray\",\n \"lw\": 0.5,\n \"ls\": \"--\",\n \"zorder\": -1\n }\n fitLineStyle = {\n \"lw\": 0.9,\n \"color\": \"red\",\n \"label\": \"Fit\"\n }\n kdeLineStyle = {\n \"lw\": 0.0,\n \"marker\": \".\",\n \"ms\": 3,\n \"color\": \"green\",\n \"label\": \"KDE\",\n }\n histLineStyle = {\n \"rwidth\": 0.9,\n \"label\": \"Bins\",\n }\n styles = {\n \"Base\": baseLineStyle,\n \"Fit\": fitLineStyle,\n \"KDE\": kdeLineStyle,\n \"Bins\": histLineStyle,\n }\n\n # Compute distribution fits\n mean, sdev = np.mean(dist), np.std(dist, ddof=1)\n\n # Kolmogorov-Smirnov test\n ksRes = stats.kstest(dist, 'norm', args=(mean, sdev))\n\n # Estimate KDE and compare to normal\n kde = smnp.KDEUnivariate(dist)\n kde.fit(kernel=\"gau\", bw=\"scott\", fft=True, gridsize=100, cut=3)\n ## Get infinitesimal step size\n deltaX = kde.support[1] - kde.support[0]\n ## Compute fitted PDF\n normal = stats.norm.pdf(kde.support, loc=mean, scale=sdev)\n ## Compute difference\n kdeDiff = (2*(kde.density-normal)/(kde.density+normal))\n normKDEDiff = np.sqrt(np.sum(kdeDiff**2)*deltaX)\n\n # Set title\n axs[0].set_title(\n \"KS Test result: Statistic = {stat:1.3f}, P-Value = {pvalue:1.3f}\".format(\n stat=ksRes.statistic, pvalue=ksRes.pvalue\n ) + \",\\nintegrated KDE difference = {normKDEDiff:1.3f}\".format(\n normKDEDiff=normKDEDiff\n )\n )\n\n # Compute fits\n yb, xb = np.histogram(dist, bins=\"fd\")\n\n #Plot PDF\n ax = axs[0]\n sns.distplot(\n dist,\n hist_kws=styles[\"Bins\"],\n kde_kws=styles[\"KDE\"],\n fit_kws=styles[\"Fit\"],\n ax=ax,\n norm_hist=True,\n fit=stats.norm\n )\n ## Axis styling\n ax.axvline(mean, label=r\"$\\mu$\", **baseLineStyle)\n ax.set_ylabel(\"PDF\")\n ax.set_yticks([])\n ax.legend([])\n\n # CDFs\n ax = axs[1]\n styles[\"KDE\"].update({\"cumulative\":True})\n styles[\"Bins\"].update({\"cumulative\":True})\n ## Plot CDFs\n ecdf = sns.distplot(\n dist,\n hist_kws=styles[\"Bins\"],\n kde_kws=styles[\"KDE\"],\n ax=ax,\n norm_hist=True\n )\n ## Get the x-range\n lines = ecdf.get_lines()[0]\n xl = lines.get_xdata()\n ## Compute the fitted CDF\n cdf = stats.norm.cdf(xl, loc=mean, scale=sdev)\n ax.plot(xl, cdf, **fitLineStyle)\n ## Styling\n ax.set_ylabel(\"CDF\")\n ax.axvline(mean, label=r\"$\\mu$\", **baseLineStyle)\n ax.axhline(0.5, **baseLineStyle)\n ax.set_yticks(np.linspace(0.25, 1, 4))\n ax.legend(loc=\"upper left\", frameon=True)\n\n # Difference plot\n ax = axs[2]\n for key in [\"KDE\", \"Bins\"]:\n styles[key].pop(\"cumulative\")\n styles[key].pop(\"label\")\n\n # Plot KDE difference\n ax.plot(kde.support, kdeDiff, **styles[\"KDE\"])\n\n # Plot bin difference\n rwidth = styles[\"Bins\"].pop(\"rwidth\")\n styles[\"Bins\"].pop(\"normed\")\n midBin = (xb[1:]+xb[:-1])/2\n yb = yb/np.sum(yb*(xb[1:]-xb[:-1]))\n pdf = stats.norm.pdf(midBin, loc=mean, scale=sdev)\n diff = 2*(yb-pdf)/(yb+pdf)\n ax.bar(\n xb[:-1]+deltaX/2,\n diff,\n width=(xb[1:]-xb[:-1])*rwidth,\n align='edge',\n **styles[\"Bins\"]\n )\n ax.set_ylabel(r\"$\\Delta$PDF\")\n\n ax.set_ylim(min(-0.1, diff.min())*1.5, max(diff.max(), 0.1)*1.5)\n\n\n ## Styling\n ax.axvline(mean, **baseLineStyle)\n baseLineStyle[\"color\"] = \"black\"\n baseLineStyle[\"ls\"] = \"-\"\n ax.axhline(0, **baseLineStyle)\n\n # General styling\n for nax, ax in enumerate(axs):\n # Labels right\n ax.yaxis.set_label_position(\"right\")\n # Ticks styling\n ax.tick_params(\n axis=\"both\",\n direction='inout',\n width=0.5,\n length=2.5,\n top=(nax != 0)\n )\n # set line width\n for val in ax.spines.values():\n val.set_linewidth(0.5)\n\n # Remove line width for PDF plot\n for pos in [\"left\", \"top\", \"right\"]:\n axs[0].spines[pos].set_linewidth(0)\n\n ax.set_xlim(dist.min(), dist.max())\n\n # Adjust internal plot spacings\n plt.subplots_adjust(hspace=0.0)\n\n return fig", "def plot_perc_scaling(q, sizes=np.logspace(1,2,50,dtype=int)):\n res = []\n for size in sizes:\n perc = Percolation(size, q)\n if test_perc(perc):\n num_filled = perc.num_wet() - size\n res.append((size, size**2, num_filled))\n\n sizes, cells, filled = zip(*res)\n\n options = dict(linestyle='dashed', color='gray', alpha=0.7)\n\n fig, ax = plt.subplots()\n ax.plot(sizes, cells, label='d=2', **options)\n ax.plot(sizes, filled, 'k.', label='filled')\n ax.plot(sizes, sizes, label='d=1', **options)\n\n decorate( xlabel = 'Array Size',\n ylabel = 'Cell Count',\n xscale = 'log', xlim = [9, 110],\n yscale = 'log', ylim = [9, 20000],\n loc = 'upper left')\n plt.show()\n\n for ys in [cells, filled, sizes]:\n params = linregress(np.log(sizes), np.log(ys))\n print('Slope of lines:\\n', params[0])", "def plot_insertsize():", "def plot(self, dis_type,diameter=\"*\",thickness=\"*\", loglog=False):\n if dis_type not in self.dis_types:\n print(\"Type %s does not exist, please check it\" % dis_type)\n return\n if diameter != \"*\" and (diameter not in self.diameters):\n print(\"Diameter %s does not exist, please check it\" % diameter)\n return\n if thickness != \"*\" and (thickness not in self.thicknesses):\n print(\"thickness %s does not exist, please check it\" % thickness)\n return\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title('%s' % self.plotTypes[dis_type])\n if diameter != \"*\":\n if thickness != \"*\":\n ax.set_title('%s , diameter = %s nm, thickness = %s nm' % (self.plotTypes[dis_type],diameter,thickness))\n else:\n ax.set_title('%s , diameter = %s nm' % (self.plotTypes[dis_type],diameter))\n \n if (thickness != \"*\" and diameter == \"*\"):\n ax.set_title('%s , thickness = %s nm' % (self.plotTypes[dis_type],thickness))\n\n for diam in sorted(self.distrs[dis_type]):\n if (diam==diameter and diameter!=\"*\") or diameter==\"*\":\n for thick in sorted(self.distrs[dis_type][diam]):\n if (thick==thickness and thickness!=\"*\") or thickness==\"*\":\n d = self.distrs[dis_type][diam][thick]\n if thickness==\"*\" and diameter==\"*\":\n lb = \" d= %s nm, t= %s nm\" % (diam,thick)\n else:\n if diameter==\"*\":\n lb = \"d= %s nm\" % (diam)\n else:\n lb = \"t= %s nm\" % (thick)\n ax.plot(d.x, d.y, label=lb)\n \n ax.legend(numpoints=1,loc=4)\n ax.grid(True)\n # Here we need to explicity say to show the plot\n plt.show()", "def margin_size(self, value: int) -> None:\n\n if not isinstance(value, int):\n raise TypeError(\"The margin size must be an integer\")\n\n margin_spacing = (2 * value) + (2 * self._border_thickness)\n\n if margin_spacing >= self.widget_width:\n raise ValueError(\n \"The size of the borders and margins combined can total the same or more\"\n \"than the widget's width.\"\n )\n\n if margin_spacing >= self.widget_height:\n raise ValueError(\n \"The size of the borders and margins combined can total the same or more\"\n \"than the widget's height.\"\n )\n\n self._margin_size = value\n self._set_progress(self._progress) # For a render pass", "def plot_variance(self, ax):\n sigma = self.sigma\n S = self.S\n\n ax.plot(sigma/S**2, 'ko-', label='variance', lw=2.0)\n ax.set_yscale('log')\n ax.set_title(r'Variance $\\sigma^2/\\sigma_i^2$')\n ax.set_xlabel(r'$i$')\n ax.grid()", "def plot(self, figshow=None, figsave=None, figname=None):\n self.rv()\n fig, ax = plt.subplots(1,1, figsize=(8,6))\n ax.tick_params(which='major', labelsize=20, direction='in', top=True, right=True, length=6, width=1.4)\n ax.tick_params(which='minor', labelsize=20, direction='in', top=True, right=True, length=3, width=1.4)\n for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(2.0)\n bins = np.linspace(min(self.mcsigmarv)*0.99, max(self.mcsigmarv)*1.01, num=100)\n posty, postx, patches = ax.hist(self.mcsigmarv, bins=bins, ec='b', color='gray', density=True)\n ax.plot([self.sigmarv, self.sigmarv], [0, max(posty)], 'r')\n ax.plot([self.sigmarv+self.sigmarvperr, self.sigmarv+self.sigmarvperr], [0, max(posty)], '--r')\n ax.plot([self.sigmarv-self.sigmarvmerr, self.sigmarv-self.sigmarvmerr], [0, max(posty)], '--r')\n minorLocator = AutoMinorLocator()\n ax.xaxis.set_minor_locator(minorLocator)\n minorLocator = AutoMinorLocator()\n ax.yaxis.set_minor_locator(minorLocator)\n ax.set_xlabel(r'$\\sigma_{\\rm rms, rv}\\ [\\rm m/s]$', fontsize=20)\n ax.set_ylabel('Probability Density', fontsize=20)\n ax.annotate(r'$\\sigma_{\\rm rms,\\ RV}$', xy=(0.45, 0.9), xycoords=\"axes fraction\", fontsize=18)\n ax.annotate(r'= {:.2f} +{:.2f} -{:.2f} [m/s]'.format(self.sigmarv, self.sigmarvperr, self.sigmarvmerr), xy=(0.58, 0.9), xycoords=\"axes fraction\", fontsize=15)\n plt.tight_layout()\n if figsave==True: plt.savefig(figname) if figname is not None else plt.savefig('rvjitter.png')\n if figshow==True: plt.show()\n plt.close('all')\n return self.sigmarv, self.sigmarvperr, self.sigmarvmerr, self.mcsigmarv", "def interactions_plot():\n data = load_data('ints_CC'),load_data('ints_CD')\n fig,ax = plt.subplots()\n plot_mean_std(data_CC,ax,'C-C interactions')\n plot_mean_std(data_CD,ax,'C-D interactions')\n plt.xlabel('cluster size, n')\n plt.legend(loc='best')\n plt.savefig('interactions.pdf')", "def fig_4():\n epoch = 3\n N = 60000\n Nr = N\n K = 32\n n_iter = 256\n Nstar = 16\n data = 'dr10'\n factor = 100.\n features = ['psf_mag', 'model_colors', 'psf_minus_model']\n filters = ['r', 'ug gr ri iz', 'ugriz']\n message = 'pm_mc_pmm_r_all_all'\n model = 'xdmodel_%s_%d_%d_%d_%d_%s.pkl' % (data, Nr, K, n_iter, Nstar,\n message)\n model = os.environ['xddata'] + model\n figname = os.environ['xdplots'] + 'fig4.png'\n xx_plot(epoch, model, features, filters, figname)", "def distribution_horizontale_new(args):\n number_files = [2,5,10,20];\n rep = args[\"path_save\"]+args[\"correction\"]+\\\n \"/data_p_\"+str(args[\"p_value\"])+\"/distribution/\";\n fig = plt.figure(); default_size = fig.get_size_inches(); \n f, ax_arrs = plt.subplots(2, 4, figsize=(default_size[0]*2.2, \\\n default_size[1]*1.5), \\\n );\n cpt1 = 0; cpt2 = 1; tab_bins = [20, 40, 80,100, 100, 100]\n for ind, k in enumerate(number_files) :\n df = pd.read_csv(rep+args[\"fichier_prefix\"] +str(k)+args[\"ext\"], \\\n names=[\"cpt\",\"moy_dc\",\"moy_dh\", \"aretes_matE\", \"correl_dh_dl\"], \\\n sep=';')\n N_graphs = df[\"moy_dc\"].count();\n \n aretes = find_aretes(args, df)\n bins = range(0, (ind+1)*args[\"num_bins\"]);\n bins = range(0, tab_bins[ind]);\n max_count_dl, max_count_dh = count_max_df(df)\n \n # plot ax1\n (mu, sigma) = norm.fit(df[\"moy_dc\"]); # best fit of data\n sns.distplot(df[\"moy_dc\"], ax = ax_arrs[cpt1, ind], bins = bins, kde = False)\n ax_arrs[cpt1, ind].set(xlabel= \"moy_distance_correction\", \\\n ylabel= \"nombre_graphe\", \\\n title = \"distance de correction pour \\n \"+ str(k)+ \\\n \" cases modifiees\")\n ax_arrs[cpt1, ind].plot([k+1,k+1], (0,max_count_dl), 'r--' )\n ax_arrs[cpt1, ind].set_yticklabels(['{:3.2f}%'.format(x*100/N_graphs) \\\n for x in ax_arrs[cpt1, ind].get_yticks()])\n \n #plot ax2\n (mu, sigma) = norm.fit(df[\"moy_dh\"]); # best fit of data\n sns.distplot(df[\"moy_dh\"], ax = ax_arrs[cpt2, ind], bins = bins, kde = False)\n ax_arrs[cpt2, ind].set(xlabel= \"moy_distance_correction\", \\\n ylabel= \"nombre_graphe\", \\\n title = \"distance Hamming pour \\n \"+ str(k)+\\\n \" cases modifiees\")\n ax_arrs[cpt2, ind].plot([k+1,k+1], (0,max_count_dl), 'r--' )\n ax_arrs[cpt2, ind].set_yticklabels(['{:3.2f}%'.format(x*100/N_graphs) \\\n for x in ax_arrs[cpt2, ind].get_yticks()])\n \n# fig = ax_arrs[0,0].figure ;\n# fig.text(0.5,0.04, \"Some very long and even longer xlabel\", ha=\"center\", va=\"center\")\n# fig.text(0.05,0.5, \"Some quite extensive ylabel\", ha=\"center\", va=\"center\", rotation=90)\n\n \n plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)\n plt.grid(True)\n plt.savefig(args[\"path_save\"]+args[\"correction\"]+\"/courbes/\"+\\\n \"distributionMoyDCDHp05k1251020.jpeg\",\\\n dpi= 250) #190 " ]
[ "0.6333102", "0.6179185", "0.60010207", "0.5960666", "0.5759967", "0.5678299", "0.55475837", "0.554194", "0.5537761", "0.5533913", "0.55258965", "0.5466195", "0.5440112", "0.54317516", "0.5392096", "0.53833497", "0.53494036", "0.5318962", "0.53053164", "0.5287674", "0.52817345", "0.5281321", "0.527806", "0.5275696", "0.52749395", "0.5252344", "0.5248197", "0.5227036", "0.5211542", "0.52057064" ]
0.6800891
0
View to handle connecting existing django accounts with facebook
def facebook_connect(request, template='socialregistration/facebook.html', extra_context=dict()): # for facebook the login is done in JS, so by the time it hits our view here there is no redirect step. Look for the querystring values and use that instead of worrying about session. connect_object = get_object(request.GET) if getattr(request.facebook, 'user', False): # only go this far if the user authorized our application and there is user info if connect_object: # this exists so that social credentials can be attached to any arbitrary object using the same callbacks. # Under normal circumstances it will not be used. Put an object in request.session named 'socialregistration_connect_object' and it will be used instead. # After the connection is made it will redirect to request.session value 'socialregistration_connect_redirect' or settings.LOGIN_REDIRECT_URL or / try: # get the profile for this facebook UID and connected object profile = FacebookProfile.objects.get(uid=request.facebook.uid, content_type=ContentType.objects.get_for_model(connect_object.__class__), object_id=connect_object.pk) profile.consumer_key = request.facebook.user['access_token'] profile.secret = request.facebook.user['secret'] profile.save() except FacebookProfile.DoesNotExist: FacebookProfile.objects.create(content_object=connect_object, uid=request.facebook.uid, \ consumer_key=request.facebook.user['access_token'], consumer_secret=request.facebook.user['secret']) else: if request.facebook.uid is None or request.user.is_authenticated() is False: extra_context.update(dict(error=FB_ERROR)) return render_to_response(template, extra_context, context_instance=RequestContext(request)) try: profile = FacebookProfile.objects.get(uid=request.facebook.uid, content_type=ContentType.objects.get_for_model(User)) profile.consumer_key = request.facebook.user['access_token'] profile.secret = request.facebook.user['secret'] profile.save() except FacebookProfile.DoesNotExist: profile = FacebookProfile.objects.create(content_object=request.user, uid=request.facebook.uid, consumer_key=request.facebook.user['access_token'], consumer_secret=request.facebook.user['secret']) else: messages.info(request, "You must authorize the Facebook application in order to link your account.") try: redirect = request.META['HTTP_REFERER'] # send them where they came from except KeyError: redirect = _get_next(request) # and fall back to what the view would use otherwise return HttpResponseRedirect(redirect) return HttpResponseRedirect(_get_next(request))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def facebook_authentication(request):\n try:\n code = request.GET['code']\n except:\n messages.error('There is some problem in connecting with facebook at the moment, Please try sign up with email')\n return HttpResponseRedirect('/')\n\n try:\n access_token = get_facebook_access_token(code)\n except:\n return HttpResponseRedirect(settings.ERROR_REDIRECT_URL)\n\n try:\n profile_response_json, profile_response_dict = get_facebook_user_info(access_token)\n except:\n return HttpResponseRedirect(settings.ERROR_REDIRECT_URL)\n\n member_id = str(profile_response_dict['id'])\n\n try:\n facebook_profile = FacebookProfile.objects.get(facebook_id=member_id)\n user = facebook_profile.user\n\n if access_token != facebook_profile.access_token:\n facebook_profile.access_token = access_token\n facebook_profile.save()\n\n except FacebookProfile.DoesNotExist:\n if not 'email' in profile_response_dict:\n request.session['profile_response_dict'] = profile_response_dict\n request.session['profile_response_json'] = profile_response_json\n request.session['member_id'] = member_id\n request.session['access_token'] = access_token\n\n return HttpResponseRedirect(reverse('facebook_email_form'))\n else:\n try:\n user = User.objects.get(email__iexact=profile_response_dict['email'])\n except User.DoesNotExist:\n user = User.objects.create_user(\n username=create_username(profile_response_dict['name']),\n email=profile_response_dict['email'],\n first_name=profile_response_dict['name'],\n )\n\n FacebookProfile.objects.create(\n user=user,\n facebook_id=member_id,\n access_token=access_token,\n profile_data=profile_response_json,\n )\n\n user.backend = \"django.contrib.auth.backends.ModelBackend\"\n login(request, user)\n\n return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL)", "def social_connection(request):\n backend_map = {'facebook': {'name': 'facebook', 'connected': False,\n 'dc_url': reverse('social:disconnect', kwargs={'backend': 'facebook'})},\n 'google': {'name': 'google-oauth2', 'connected': False,\n 'dc_url': reverse('social:disconnect', kwargs={'backend': 'google-oauth2'})}\n }\n accounts = UserSocialAuth.objects.filter(user=request.user)\n\n for account in accounts:\n for k, v in backend_map.iteritems():\n if v['name'] == account.provider:\n backend_map[k]['connected'] = True\n\n return render_to_response('base/social_account.html',\n context_instance=RequestContext(request, {'accounts': backend_map}))", "def homefb(request):\n if request.user.is_authenticated():\n return HttpResponse(\"{0} <a href='/accounts/logout'>exit</a>\".format(request.user))\n else:\n return HttpResponse(\"<a href='/login/facebook/?next=https://chepolina.pythonanywhere.com/'>login with Facebook</a>\")", "def facebook_login(request):\n permissions = (\", \").join([permission for permission in settings.FACEBOOK_EXTENDED_PERMISSIONS])\n url = \"https://graph.facebook.com/oauth/authorize?client_id=%s&redirect_uri=%s&scope=%s\" \\\n % (settings.FACEBOOK_CLIENT_ID, facebook_redirect_url, permissions)\n\n return HttpResponseRedirect(url)", "def login(request):\n args = {\n 'client_id': settings.FACEBOOK_APP_ID,\n 'scope': settings.FACEBOOK_SCOPE,\n 'redirect_uri': request.build_absolute_uri(\\\n reverse('horizon.facebook.views.authentication_callback')\n )\n }\n return HttpResponseRedirect('https://www.facebook.com/dialog/oauth?' + urllib.urlencode(args))", "def fb_registration(request):\n if request.POST:\n if 'signed_request' in request.POST:\n # parse and check data\n data = parse_signed_request(request.POST['signed_request'], FACEBOOK_CONNECT_SECRET)\n\n # lets try to check if user exists based on username or email\n try:\n check_user = User.objects.get(username=data['registration']['name'])\n except:\n pass\n else:\n return HttpResponseRedirect('/user/login/')\n\n try:\n check_user = User.objects.get(email=data['registration']['email'])\n except:\n pass\n else:\n return HttpResponseRedirect('/user/login/')\n\n # user does not exist. We create an account\n # in this example I assume that he will login via Facebook button or RPXnow\n # so no password is needed for him - using random password\n randompass = ''.join([choice('1234567890qwertyuiopasdfghjklzxcvbnm') for i in range(7)])\n user = User.objects.create_user(data['registration']['name'], data['registration']['email'], randompass)\n user.save()\n user = authenticate(username=data['registration']['name'], password=randompass)\n if user is not None:\n # save in user profile his facebook id. In this case for RPXNow login widget\n fbid = data['user_id']\n fbid=int(fbid)\n r = Person(user=user, identifier=fbid)\n r.save()\n login(request, user)\n return render_to_response('register.html', {}, context_instance=RequestContext(request))\n\n return render_to_response('register.html', {}, context_instance=RequestContext(request))", "def facebook_login():\n if not facebook.authorized:\n return redirect(url_for('facebook.login'))\n account_info = facebook.get('me?fields=id,name,email')\n# print(account_info)\n if account_info.ok:\n account_info_json = account_info.json()\n user = {}\n user['email'] = account_info_json['email']\n user['firstName'] = account_info_json['name'].split()[0]\n user['lastName'] = account_info_json['name'].split()[1]\n return third_party_user_handler(user['email'],user['firstName'], user['lastName'], 'facebook')", "def fbconnect():\n\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n access_token = request.data\n access_token = access_token.decode(\"utf-8\")\n # Exchange client token for long-lived server-side token with GET \n # /oauth/access_toke?grant_type=fb_exchange_token&client_id={app-id}&\n # client_secret={app-secret}&fb_exchange_token={short-lived-token}\n app_id = json.loads(open('fb_client_secrets.json','r').read())['web']['app_id']\n app_secret = json.loads(open('fb_client_secrets.json','r').read())['web']['app_secret']\n url = \"https://graph.facebook.com/oauth/access_token?grant_type=\"+\\\n \"fb_exchange_token&client_id=%s&client_secret=%s&fb_exchange_token=%s\"\\\n % (app_id, app_secret, access_token)\n h = httplib2.Http()\n result = h.request(url, 'GET')[1]\n result_string = result.decode(\"utf-8\")\n # Use token to get user info from API\n userinfo_url = \"https://graph.facebook.com/v3.2/me\"\n # Strip expire tag from access token\n token = result_string.split(',')[0].split(':')[1].replace('\"', '')\n url = \"https://graph.facebook.com/v3.2/\"+\\\n \"me?access_token=%s&fields=name,id,email\" % token\n h = httplib2.Http()\n result = h.request(url, 'GET')[1]\n result_string = result.decode(\"utf-8\")\n data = json.loads(result_string)\n login_session['provider'] = 'facebook'\n login_session['username'] = data[\"name\"]\n login_session['email'] = data[\"email\"]\n login_session['facebook_id'] = data[\"id\"]\n # Get user picture\n url = \"https://graph.facebook.com/v3.3/\"+\\\n \"me?access_token=%s&fields=picture\" % token\n h = httplib2.Http()\n result = h.request(url, 'GET')[1]\n data = json.loads(result)\n login_session['picture'] = data[\"picture\"][\"data\"][\"url\"]\n # See if user exists\n user_id = getUserID(login_session['email'])\n if not user_id:\n user_id = createUser(login_session)\n login_session['user_id'] = user_id\n output = ''\n output += '<div class=\"container text-center\">'+\\\n '<div class=\"row justify-content-md-center\">'+\\\n '<div class=\"col-md-8 border p-1 m-1\">'+\\\n '<pclass=\"m-1\">Welcome, '\n output += login_session['username']\n output += '!</p>'\n output += '<div class=\"d-flex justify-content-center m-1\">'+\\\n '<img class=\"rounded mx-auto d-block\" width=\"30%\" src=\"'\n output += login_session['picture']\n output += '\"></div></div></div></div>'\n return output", "def facebook_tokens(request):\n fbmanager = FacebookManager()\n projects = Project.objects.all().exclude(fanpage_id='0')\n for project in projects:\n if project.fanpage_token in ('', '0'):\n token = fbmanager.get_access_token_token(project.fanpage_id)\n project.fanpage_token = token\n project.save()\n return render_to_response('fbtokens.html',\n {'projects': projects},\n context_instance=RequestContext(request))", "def facebook_login(request, template='socialregistration/facebook.html',\n extra_context=dict(), account_inactive_template='socialregistration/account_inactive.html'):\n if request.facebook.uid is None:\n extra_context.update(dict(error=FB_ERROR))\n return render_to_response(template, extra_context,\n context_instance=RequestContext(request))\n\n user = authenticate(uid=request.facebook.uid)\n\n if user is None:\n request.session['socialregistration_user'] = User()\n request.session['socialregistration_profile'] = FacebookProfile(uid=request.facebook.uid)\n request.session['next'] = _get_next(request)\n return HttpResponseRedirect(reverse('socialregistration_setup'))\n\n if not user.is_active:\n return render_to_response(account_inactive_template, extra_context,\n context_instance=RequestContext(request))\n\n login(request, user)\n\n return HttpResponseRedirect(_get_next(request))", "def pre_social_login(self, request, sociallogin):\n\n if sociallogin.is_existing:\n return\n\n email_addresses = sociallogin.email_addresses\n\n for email in email_addresses:\n try:\n user_email = EmailAddress.objects.get(email__iexact=email.email)\n except EmailAddress.DoesNotExist:\n continue\n\n user = user_email.user\n sociallogin.connect(request, user)", "def fbconnect():\n\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n access_token = request.data\n print \"access token received %s \" % access_token\n app_id = FB_CLIENT_ID\n app_secret = json.loads(open('fb_client_secrets.json',\n 'r').read())['web']['app_secret']\n url = 'https://graph.facebook.com/oauth/access_token?'\n url += 'grant_type=fb_exchange_token&client_id=%s&' % (app_id)\n url += 'client_secret=%s&fb_exchange_token=%s' % (app_secret, access_token)\n h = httplib2.Http()\n result = h.request(url, 'GET')[1]\n # Use token to get user info from API\n userinfo_url = \"https://graph.facebook.com/v2.8/me\"\n '''\n Due to the formatting for the result from the server token exchange we\n have to split the token first on commas and select the first index\n which gives us the key : value for the server access token then we\n split it on colons to pull out the actual token value and replace the\n remaining quotes with nothing so that it can be used directly in the\n graph api calls\n '''\n token = result.split(',')[0].split(':')[1].replace('\"', '')\n url = 'https://graph.facebook.com/v2.8/me'\n url += '?access_token=%s&fields=name,id,email' % token\n h = httplib2.Http()\n result = h.request(url, 'GET')[1]\n\n # print \"url sent for API access:%s\"% url\n # print \"API JSON result: %s\" % result\n data = json.loads(result)\n login_session['provider'] = 'Facebook'\n login_session['username'] = data[\"name\"]\n login_session['email'] = data[\"email\"]\n login_session['facebook_id'] = data[\"id\"]\n\n # The token must be stored in the login_session in order to properly logout\n login_session['access_token'] = token\n\n # Get user picture\n url = 'https://graph.facebook.com/v2.8/me/picture?access_token='\n url += '%s&redirect=0&height=200&width=200' % token\n h = httplib2.Http()\n result = h.request(url, 'GET')[1]\n data = json.loads(result)\n login_session['picture'] = data[\"data\"][\"url\"]\n\n # see if user exists\n user_id = getUserID(login_session['email'])\n if not user_id:\n user_id = createUser(login_session)\n login_session['user_id'] = user_id\n output = ''\n output += '<h1>Welcome, '\n output += login_session['username']\n output += '!</h1>'\n output += '<img src=\"'\n output += login_session['picture']\n output += ' \" style = \"width: 300px; height: 300px;border-radius: 150px;'\n output += '-webkit-border-radius: 150px;-moz-border-radius: 150px;\"> '\n flash(\"Now logged in as %s\" % login_session['username'])\n return output", "def get_facebook_token(request):\n url = request.query_params.get('url', None)\n if url is None:\n raise ValidationError(\"No callback URL specified\")\n\n user_id = request.query_params.get('user', None)\n if user_id is None:\n raise ValidationError(\"No user specified on the URL\")\n\n academy = request.query_params.get('a', None)\n if academy is None:\n raise ValidationError(\"No academy specified on the URL\")\n\n url = base64.b64decode(url).decode(\"utf-8\")\n # Missing scopes!! admin.invites:write, identify\n scopes = (\"email\",\n \"ads_read\", \"business_management\", \"leads_retrieval\", \"pages_manage_metadata\", \"pages_read_engagement\",\n )\n query_string = f'a={academy}&url={url}&user={user_id}'.encode(\"utf-8\")\n payload = str(base64.urlsafe_b64encode(query_string), \"utf-8\")\n params = {\n \"client_id\": os.getenv('FACEBOOK_CLIENT_ID', \"\"),\n \"redirect_uri\": os.getenv('FACEBOOK_REDIRECT_URL', \"\"),\n \"scope\": \",\".join(scopes),\n \"state\": payload\n }\n redirect = \"https://www.facebook.com/v8.0/dialog/oauth?\"\n for key in params:\n redirect += f\"{key}={params[key]}&\"\n\n if settings.DEBUG:\n return HttpResponse(f\"Redirect to: <a href='{redirect}'>{redirect}</a>\")\n else:\n return HttpResponseRedirect(redirect_to=redirect)", "def facebook_signin(access_token):\n # verify access token has been requested with the correct app id\n resp = requests.get(\n \"https://graph.facebook.com/app/\",\n params={'access_token': access_token}\n )\n data = resp.json()\n\n if resp.status_code != 200:\n return data, resp.status_code\n\n if data['id'] != current_app.config['OAUTH_CREDENTIALS']['facebook']['id']:\n return \"Authentication failed.\", 401\n\n # get user profile\n resp = requests.get(\n \"https://graph.facebook.com/me\",\n params={'access_token': access_token, 'fields': 'id,email,name,first_name,last_name,gender,picture'}\n )\n me = resp.json()\n\n if resp.status_code != 200:\n return me, resp.status_code\n\n if 'email' not in me:\n return 'Email information not provided, cannot register user', 401\n\n # check if user exists with its email as unique identifier\n user = User.get_byemail(me['email'])\n if not user:\n # primary user doesn't exists, creating it\n user = User.add_user(\n name=me['name'],\n first_name=me['first_name'],\n last_name=me['last_name'],\n email=me['email'],\n gender=me.get('gender', None),\n image_url=me[\"picture\"][\"data\"][\"url\"] if me.get('picture') else '')\n else:\n # if already exists just update with a new apikey and profile pic\n user.update_apikey(User.generate_apikey(user.email))\n user.update_profile(image_url=me[\"picture\"][\"data\"][\"url\"] if me.get('picture') else '')\n # known facebook account ?\n auth_id = 'facebook${}'.format(me['id'])\n user_auth = UserAuth.exists(auth_id)\n\n if not user_auth:\n # add user auth informations\n UserAuth.add_userauth(\n user_id=user.id,\n name=user.name,\n auth_id=auth_id,\n email=user.email,\n auth_type='facebook',\n fullprofile=me\n )\n\n # login user (powered by flask-login)\n login_user(user, True)\n\n resp = {\n 'auth_id': auth_id,\n }\n resp.update(user.json)\n\n return resp, 200", "def openid_callback(request, template='socialregistration/openid.html',\n extra_context=dict(), account_inactive_template='socialregistration/account_inactive.html'):\n client = OpenID(\n request,\n 'http%s://%s%s' % (\n _https(),\n Site.objects.get_current().domain,\n reverse('openid_callback')\n ),\n request.session.get('openid_provider')\n )\n\n if client.is_valid():\n identity = client.result.identity_url\n\n if 'socialregistration_connect_object' in request.session and request.session['socialregistration_connect_object'] != None:\n # this exists so that social credentials can be attached to any arbitrary object using the same callbacks.\n # Under normal circumstances it will not be used. Put an object in request.session named 'socialregistration_connect_object' and it will be used instead.\n # After the connection is made it will redirect to request.session value 'socialregistration_connect_redirect' or settings.LOGIN_REDIRECT_URL or /\n try:\n # get the profile for this facebook UID and type of connected object\n profile = OpenIDProfile.objects.get(identity=identity, content_type=ContentType.objects.get_for_model(request.session['socialregistration_connect_object'].__class__), object_id=request.session['socialregistration_connect_object'].pk)\n except OpenIDProfile.DoesNotExist:\n OpenIDProfile.objects.create(content_object=request.session['socialregistration_connect_object'], identity=identity)\n\n del request.session['socialregistration_connect_object']\n else:\n if request.user.is_authenticated():\n # Handling already logged in users just connecting their accounts\n try:\n profile = OpenIDProfile.objects.get(identity=identity, content_type=ContentType.objects.get_for_model(User))\n except OpenIDProfile.DoesNotExist: # There can only be one profile with the same identity\n profile = OpenIDProfile.objects.create(content_object=request.user,\n identity=identity)\n\n return HttpResponseRedirect(_get_next(request))\n\n user = authenticate(identity=identity)\n if user is None:\n request.session['socialregistration_user'] = User()\n request.session['socialregistration_profile'] = OpenIDProfile(\n identity=identity\n )\n return HttpResponseRedirect(reverse('socialregistration_setup'))\n\n if not user.is_active:\n return render_to_response(\n account_inactive_template,\n extra_context,\n context_instance=RequestContext(request)\n )\n\n login(request, user)\n return HttpResponseRedirect(_get_next(request))\n\n return render_to_response(\n template,\n dict(),\n context_instance=RequestContext(request)\n )", "def login_view(request):\n try:\n #user = Person.objects.filter(username__iexact=\"gerstem5\")#Bypass basf auth\n #django_login(request, user[0])\n #return HttpResponseRedirect(request.GET[\"next\"])\n\n next = request.GET[\"next\"]\n url = basf_login + \"?redirect_uri=\" + redirect_uri\n url += \"?next=\"\n url += quote(next, safe=\"\")\n return HttpResponseRedirect(url)\n except:\n return HttpResponse(\"Bad Request\",status=400)", "def login_facebook():\n # Prevent a CSRF attack from replacing a logged-in user's account with the\n # attacker's.\n current_user = view_helpers.get_current_user()\n if current_user:\n return api_util.jsonify({'message': 'A user is already logged in.'})\n\n rmclogger.log_event(\n rmclogger.LOG_CATEGORY_API,\n rmclogger.LOG_EVENT_SIGNUP, {\n 'type': rmclogger.LOGIN_TYPE_STRING_FACEBOOK,\n },\n )\n\n req = flask.request\n fb_access_token = req.form.get('fb_access_token')\n\n # We perform a check to confirm the fb_access_token is indeed the person\n # identified by fbid, and that it was our app that generated the token.\n token_info = facebook.get_access_token_info(fb_access_token)\n\n if not token_info['is_valid'] or not token_info.get('user_id'):\n raise api_util.ApiForbiddenError(\n 'The given FB credentials are invalid.')\n\n fbid = str(token_info['user_id'])\n user = m.User.objects(fbid=fbid).first()\n\n if not user:\n raise api_util.ApiForbiddenError('No user with fbid %s exists. '\n 'Create an account at uwflow.com.'\n % fbid)\n\n view_helpers.login_as_user(user)\n # TODO(sandy): We don't need to do this anymore, just use the endpoint\n csrf_token = view_helpers.generate_csrf_token()\n\n return api_util.jsonify({\n 'message': 'Logged in user %s' % user.name,\n 'csrf_token': csrf_token,\n })", "def facebook_login(request):\n state = persist_state(request)\n\n params = {\n 'client_id': facebook_conf(request, 'client_id'),\n 'redirect_uri': request.route_url(token.name),\n 'state': state\n }\n\n login_form_url = '{}?{}'.format(facebook_conf(request, 'authorization_endpoint'),\n urlencode(params))\n\n request.response.status_code = 302\n request.response.headers['Location'] = login_form_url\n\n return {}", "def oauth_login(provider):\n\n access_token_url = 'https://graph.facebook.com/oauth/access_token'\n graph_api_url = 'https://graph.facebook.com/v2.5/me?fields=email,'\\\n 'first_name,last_name,id,picture.type(large)'\n\n params = {\n 'client_id': request.json['clientId'],\n 'redirect_uri': request.json['redirectUri'],\n 'client_secret': app.config['OAUTH_CREDENTIALS']['facebook']['secret'],\n 'code': request.json['code']\n }\n\n resource = requests.get(access_token_url, params=params)\n access_token = dict(parse_qsl(resource.text))\n resource = requests.get(graph_api_url, params=access_token)\n profile = json.loads(resource.text)\n logger.info(profile['picture']['data']['url'])\n\n user = ecomap_user.facebook_register(profile['first_name'],\n profile['last_name'],\n profile['email'],\n provider,\n profile['id'])\n\n db.insert_user_avatar(user.uid, profile['picture']['data']['url'])\n\n login_user(user, remember=True)\n\n response = jsonify(id=user.uid,\n name=user.first_name,\n surname=user.last_name,\n role=user.role, iat=\"???\",\n token=user.get_auth_token(),\n email=user.email)\n\n return response", "def done(request):\n #print (\"done\")\n done_context = context()\n done_context['user_id'] = request.user.social_auth.get(provider='facebook').uid\n return done_context", "def facebook_email_form(request):\n if not 'profile_response_dict' in request.session:\n return HttpResponseRedirect('/')\n\n if request.method == 'POST':\n profile_response_dict = request.session['profile_response_dict']\n form = EmailForm(request.POST)\n\n if form.is_valid():\n try:\n user = User.objects.get(email__iexact=form.cleaned_data['email'])\n except User.DoesNotExist:\n user = User.objects.create_user(\n username=create_username(profile_response_dict['name']),\n email=form.cleaned_data['email'],\n first_name=profile_response_dict['name'],\n )\n\n FacebookProfile.objects.create(\n user=user,\n facebook_id=request.session['member_id'],\n access_token=request.session['access_token'],\n profile_data=request.session['profile_response_json'],\n )\n\n user.backend = \"django.contrib.auth.backends.ModelBackend\"\n login(request, user)\n\n return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL)\n else:\n form = EmailForm()\n\n context = {\n 'form': form\n }\n\n return render_to_response('facebook_email_form.html', context, context_instance=RequestContext(request))", "def authenticate(self, facebook_id=None):\n print facebook_id\n if facebook_id:\n user, created = User.objects.get_or_create(username=facebook_id)\n return user\n return None", "def account_profile(request):\n get_or_creat(request)\n return redirect(\"/\")", "def connected_apps(request):\n\n context = _create_profile_context(request)\n context['active_tab'] = 'connected_apps'\n\n if request.method == 'POST':\n if not 'token_id' in request.POST:\n messages.error(request, _(u'Det ble ikke oppgitt noen tilgangsnøkkel i forespørselen.'))\n else:\n try:\n pk = int(request.POST['token_id'])\n token = get_object_or_404(AccessToken, pk=pk)\n token.delete()\n messages.success(request, _(u'Tilgangsnøkkelen ble slettet.'))\n except ValueError:\n messages.error(request, _(u'Tilgangsnøkkelen inneholdt en ugyldig verdi.'))\n\n return render(request, 'profiles/index.html', context)", "def login(request):\n # Redirect succesful logins to `next` if set.\n # Failing that `redirect_url`.\n # Failing that, LOGIN_REDIRECT_URL from settings.py.\n redirect_uri = post_or_get(\n request, 'next', fallback=post_or_get(\n request, 'redirect_url', fallback=settings.LOGIN_REDIRECT_URL))\n redirect_absolute_uri = add_query_params_to_url(\n request.build_absolute_uri(redirect_uri),\n {'auth_user': request.user.get_username()})\n\n if request.method == 'POST':\n form = LoginForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data['username'].lower()\n password = form.cleaned_data['password']\n user = datahub_authenticate(username, password)\n if user is not None and user.is_active:\n django_login(request, user)\n # Append auth_user to redirect_uri so apps like Kibitz can\n # pull the username out of the redirect. This should be\n # removed when Thrift is removed from DataHub.\n redirect_uri = add_query_params_to_url(\n redirect_uri, {'auth_user': request.user.get_username()})\n return HttpResponseRedirect(redirect_uri)\n else:\n form.add_error(None, \"Username and password do not match.\")\n else:\n # Form isn't valid. Fall through to return it to the user with\n # errors.\n pass\n else:\n form = LoginForm()\n\n providers = provider_details()\n context = RequestContext(request, {\n 'request': request,\n 'user': request.user,\n 'form': form,\n 'providers': providers,\n 'next': redirect_uri,\n 'absolute_next': redirect_absolute_uri})\n return render_to_response('login.html', context_instance=context)", "def facebook(self, facebook):\n\n self._facebook = facebook", "def test_facebook_login(self):\n response = self.client.get('/login/facebook/')\n self.assertEqual(response.status_code, 302)", "def open_facebook(account_type='buyer'):\n try:\n facebook_driver = open_new_window()\n facebook_driver.get(\"https://www.facebook.com/\")\n \n email = facebook_driver.find_element_by_id('email')\n password = facebook_driver.find_element_by_id('pass')\n login = facebook_driver.find_element_by_id('loginbutton')\n\n user_email = config.facebook_accounts[account_type]['facebook_email']\n user_password = config.facebook_accounts[account_type]['password']\n email.send_keys(user_email)\n password.send_keys(user_password)\n login.click()\n wait()\n except Exception as e:\n return \"Error: \" + str(e)\n return \"Success\"", "def facebook_login(self):\r\n self.driver.get(self.facebook_friends_url)\r\n input_email = self.driver.find_element_by_id(\"m_login_email\")\r\n input_password = self.driver.find_element_by_id(\"m_login_password\")\r\n btn_login = self.driver.find_element_by_name(\"login\")\r\n input_email.send_keys(self.facebook_username)\r\n time.sleep(1)\r\n input_password.send_keys(self.facebook_password)\r\n time.sleep(1)\r\n btn_login.click()\r\n element = WebDriverWait(self.driver, 10).until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, self.CSS_SELECTOR_FRIEND))\r\n )", "def login():\n authorization_url, state = facebook.authorization_url(authorization_base_url)\n print 'Please authorize', authorization_url\n\n return redirect(authorization_url, code=302)" ]
[ "0.7017177", "0.6939956", "0.69250727", "0.68696064", "0.67818755", "0.6711868", "0.66746074", "0.6576655", "0.63557416", "0.633736", "0.62841153", "0.62753177", "0.617561", "0.6134787", "0.6076174", "0.602836", "0.60073495", "0.5994413", "0.5967894", "0.59287626", "0.59215635", "0.591363", "0.58835864", "0.5864437", "0.582476", "0.57591414", "0.57326746", "0.5710707", "0.5694853", "0.56599927" ]
0.7441132
0
Redirect the user to the openid provider
def openid_redirect(request): request.session['next'] = _get_next(request) request.session['openid_provider'] = request.GET.get('openid_provider') request.session['socialregistration_connect_object'] = get_object(request.GET) client = OpenID( request, 'http%s://%s%s' % ( _https(), Site.objects.get_current().domain, reverse('openid_callback') ), request.GET.get('openid_provider') ) try: return client.get_redirect() except DiscoveryFailure: request.session['openid_error'] = True return HttpResponseRedirect(settings.LOGIN_URL)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def openid_done(request, provider=None):\n\n if not provider:\n provider = request.session.get('openid_provider', '')\n if hasattr(request,'openid') and request.openid:\n #check for already existing associations\n openid_key = str(request.openid)\n\n #authenticate and login\n try:\n user = authenticate(openid_key=openid_key, request=request, provider = provider)\n except:\n user = None\n\n if user:\n login(request, user)\n if 'openid_next' in request.session :\n openid_next = request.session.get('openid_next')\n if len(openid_next.strip()) > 0 :\n return HttpResponseRedirect(openid_next)\n return HttpResponseRedirect(LOGIN_REDIRECT_URL)\n # redirect_url = reverse('socialauth_editprofile')\n # return HttpResponseRedirect(redirect_url)\n else:\n\t return HttpResponseRedirect(LOGIN_URL)\n else:\n return HttpResponseRedirect(LOGIN_URL)", "def done(request, provider=None):\n if not provider:\n provider = request.session.get('openid_provider', '')\n if request.openid:\n #check for already existing associations\n identifier = str(request.openid)\n #authenticate and login\n user = authenticate(request=request, identifier=identifier, openid=request.openid, provider=provider)\n if user:\n login(request, user)\n referer = request.session.get('request_referer')\n next_url = str(getattr(settings, \"LOGIN_REDIRECT_URL\", \"/\"))\n if referer:\n next_url = urlparse.urljoin(referer, next_url)\n del request.session['request_referer']\n return HttpResponseRedirect(next_url)\n else:\n return HttpResponseRedirect(reverse(\"auth_login\"))\n else:\n return HttpResponseRedirect(reverse(\"auth_login\"))", "def start_oauth_view(request):\n url = get_oauth_url()\n return redirect(url)", "def test_login_openid_handle_redirection(self):\r\n response = self._send_bad_redirection_login()\r\n self.assertEquals(response.status_code, 302)", "def ask_openid(\n request,\n openid_url = None,\n next_url = None,\n on_failure=None\n ):\n on_failure = on_failure or signin_failure\n sreg_request = sreg.SRegRequest(optional=['nickname', 'email'])\n \n trust_root = getattr(\n settings, 'OPENID_TRUST_ROOT', get_url_host(request) + '/'\n )\n if xri.identifierScheme(openid_url) == 'XRI' and getattr(\n settings, 'OPENID_DISALLOW_INAMES', False\n ):\n msg = _(\"i-names are not supported\")\n logging.debug('openid failed because i-names are not supported')\n return on_failure(request, msg)\n consumer = Consumer(request.session, util.DjangoOpenIDStore())\n try:\n auth_request = consumer.begin(openid_url)\n except DiscoveryFailure:\n msg = _(u\"OpenID %(openid_url)s is invalid\" % {'openid_url':openid_url})\n logging.debug(msg)\n return on_failure(request, msg)\n\n logging.debug('openid seemed to work')\n if sreg_request:\n logging.debug('adding sreg_request - wtf it is?')\n auth_request.addExtension(sreg_request)\n\n redirect_to = \"%s%s?%s\" % (\n get_url_host(request),\n reverse('user_complete_openid_signin'), \n urllib.urlencode({'next':next_url})\n )\n redirect_url = auth_request.redirectURL(trust_root, redirect_to)\n logging.debug('redirecting to %s' % redirect_url)\n return HttpResponseRedirect(redirect_url)", "def do_verify(self, request):\n\n # First, make sure that the user entered something\n openid_url = request['query'].get('openid_url')\n if not openid_url:\n return self.render(request, 'Enter an identity URL to verify.',\n css_class='error', form_contents=openid_url)\n\n oidconsumer = self.oidconsumer\n\n # Then, ask the library to begin the authorization.\n # Here we find out the identity server that will verify the\n # user's identity, and get a token that allows us to\n # communicate securely with the identity server.\n status, info = oidconsumer.beginAuth(openid_url)\n\n # If the URL was unusable (either because of network\n # conditions, a server error, or that the response returned\n # was not an OpenID identity page), the library will return\n # an error code. Let the user know that that URL is unusable.\n if status in [consumer.HTTP_FAILURE, consumer.PARSE_ERROR]:\n if status == consumer.HTTP_FAILURE:\n fmt = 'Failed to retrieve <q>%s</q>'\n else:\n fmt = 'Could not find OpenID information in <q>%s</q>'\n\n message = fmt % (cgi.escape(openid_url),)\n return self.render(request, message, css_class='error', form_contents=openid_url)\n elif status == consumer.SUCCESS:\n # The URL was a valid identity URL. Now we construct a URL\n # that will get us to process the server response. We will\n # need the token from the beginAuth call when processing\n # the response. A cookie or a session object could be used\n # to accomplish this, but for simplicity here we just add\n # it as a query parameter of the return-to URL.\n return_to = self.build_url(request, 'process', token=info.token)\n\n # Now ask the library for the URL to redirect the user to\n # his OpenID server. It is required for security that the\n # return_to URL must be under the specified trust_root. We\n # just use the base_url for this server as a trust root.\n redirect_url = oidconsumer.constructRedirect(\n info, return_to, trust_root=request['base_url'])\n\n # Send the redirect response\n return self.redirect(request, redirect_url)\n else:\n assert False, 'Not reached'", "def openid_login_complete(request,\r\n redirect_field_name=REDIRECT_FIELD_NAME,\r\n render_failure=None):\r\n\r\n render_failure = (render_failure or default_render_failure)\r\n\r\n openid_response = openid_views.parse_openid_response(request)\r\n if not openid_response:\r\n return render_failure(request,\r\n 'This is an OpenID relying party endpoint.')\r\n\r\n if openid_response.status == SUCCESS:\r\n external_id = openid_response.identity_url\r\n oid_backend = openid_auth.OpenIDBackend()\r\n details = oid_backend._extract_user_details(openid_response)\r\n\r\n log.debug('openid success, details=%s', details)\r\n\r\n url = getattr(settings, 'OPENID_SSO_SERVER_URL', None)\r\n external_domain = \"{0}{1}\".format(OPENID_DOMAIN_PREFIX, url)\r\n fullname = '%s %s' % (details.get('first_name', ''),\r\n details.get('last_name', ''))\r\n\r\n return _external_login_or_signup(\r\n request,\r\n external_id,\r\n external_domain,\r\n details,\r\n details.get('email', ''),\r\n fullname\r\n )\r\n\r\n return render_failure(request, 'Openid failure')", "def _send_bad_redirection_login(self):\r\n user = UserFactory()\r\n\r\n factory = RequestFactory()\r\n post_params = {'email': user.email, 'password': 'password'}\r\n fake_url = 'fake url'\r\n request = factory.post(reverse('openid-provider-login'), post_params)\r\n openid_setup = {\r\n 'request': factory.request(),\r\n 'url': fake_url\r\n }\r\n request.session = {\r\n 'openid_setup': openid_setup\r\n }\r\n response = provider_login(request)\r\n return response", "def redirect_to_sso(self, next_url):\n resolved_url = resolve_url(self.sso_redirect_url)\n login_url_parts = list(urlparse(resolved_url))\n querystring = QueryDict(login_url_parts[4], mutable=True)\n querystring[settings.SSO_PROXY_REDIRECT_FIELD_NAME] = next_url\n login_url_parts[4] = querystring.urlencode(safe='/')\n\n return HttpResponseRedirect(urlunparse(login_url_parts))", "def sso_saml_login(request, idp_slug):\n login_url = request.saml2_auth.login()\n username = get_sso_username_from_session(request)\n if username:\n # verify that the stored user data actually the current IdP\n idp = IdentityProvider.get_active_identity_provider_by_username(username)\n if idp and idp.slug == idp_slug:\n # pre-populate username for Azure AD\n login_url = f'{login_url}&login_hint={username}'\n return HttpResponseRedirect(login_url)", "def esgf_oauth_callback(self):\n client = ESGFSLCSClient(self.request)\n if client.callback():\n # Redirect to the token view\n return HTTPFound(location=self.request.route_path('profile', userid=self.userid, tab='esgf_slcs'))\n else:\n # If we have not yet entered the OAuth flow, redirect to the start\n return HTTPFound(location=self.request.route_path('generate_esgf_slcs_token'))", "def login():\n return redirect(build_authorize_url())", "def signin(r):\n try:\n import urlparse as _urlparse\n from urllib import unquote\n except:\n import urllib.parse as _urlparse\n from urllib.parse import unquote\n next_url = r.get_full_path()\n r.session['login_next_url'] = next_url\n\n saml_client = _get_saml_client(get_current_domain(r))\n _, info = saml_client.prepare_for_authenticate()\n\n redirect_url = None\n\n for key, value in info['headers']:\n if key == 'Location':\n redirect_url = value\n break\n return HttpResponseRedirect(redirect_url)", "def petition(handler):\n # client_object = Clients()\n sound_cloud_client = Clients().sound_cloud_client()\n handler.redirect(sound_cloud_client.authorize_url())", "def authorize():\n token = oauth.tapkey.authorize_access_token()\n session['auth'] = token\n return redirect(url_for('owner_account_chooser'))", "def login(request):\n args = {\n 'client_id': settings.FACEBOOK_APP_ID,\n 'scope': settings.FACEBOOK_SCOPE,\n 'redirect_uri': request.build_absolute_uri(\\\n reverse('horizon.facebook.views.authentication_callback')\n )\n }\n return HttpResponseRedirect('https://www.facebook.com/dialog/oauth?' + urllib.urlencode(args))", "def signin_success(request, identity_url, openid_response):\n\n logging.debug('')\n openid_data = util.from_openid_response(openid_response) #create janrain OpenID object\n request.session['openid'] = openid_data\n\n provider_name = util.get_openid_provider_name(openid_data.openid)\n user = authenticate(\n identifier = openid_data.openid,\n provider_name = provider_name,\n method = 'openid'\n )\n\n next_url = get_next_url(request)\n\n request.session['email'] = openid_data.sreg.get('email', '')\n request.session['username'] = openid_data.sreg.get('nickname', '')\n\n return finalize_generic_signin(\n request = request,\n user = user,\n user_identifier = openid_data.openid,\n login_provider_name = provider_name,\n redirect_url = next_url\n )", "def login():\n next_url = request.form.get(\"next\", None)\n\n if current_app.config[\"USE_SAML\"]:\n if next_url:\n return redirect(url_for(\"auth.saml\", sso2=next_url))\n return redirect(url_for(\"auth.saml\", sso=None))\n\n elif current_app.config[\"USE_LDAP\"]:\n return redirect(url_for(\"auth.ldap_login\", next=next_url))\n\n elif current_app.config[\"USE_LOCAL_AUTH\"]:\n return redirect(url_for(\"auth.local_login\", nex=next_url))\n\n return abort(404)", "def login_redirect(request):\n settings = request.ferlysettings\n qs = urllib.parse.urlencode([\n ('response_type', 'code'),\n ('client_id', settings.cognito_client_id),\n ('redirect_uri', get_cognito_redirect_uri(request)),\n ])\n\n url = 'https://%s/login?%s' % (settings.cognito_domain, qs)\n return HTTPSeeOther(url)", "def end_oauth_view(request):\n auth_code = request.GET[\"code\"]\n save_token(auth_code)\n url = reverse(\"admin:actionstep_accesstoken_changelist\")\n return redirect(url)", "def authorize(req, resp):\n api.redirect(resp, location=authorize_url())", "def redirect_to_overdrive_url():\n from local_settings import OD_API_CLIENT_KEY\n\n # od_url_clientid = OD_API_CLIENT_KEY\n # od_url_redirect_uri = \"http://localhost:5000/oauth_overdrive\"\n # od_url_accountId = '4425' \"\"\" Library id -- should be a passed parameter\"\"\"\n # od_url_state = 'turtlebutt'\n # od_url = 'https://oauth.overdrive.com/auth?'\n # od_url = od_url + 'clientid=' + od_url_clientid\n # od_url = od_url + '&redirect_uri=' + od_url_redirect_uri\n # od_url = od_url + '&scope=accountId:' + od_url_accountId\n # od_url = od_url + '&response_type=code&state=' + od_url_state \n\n # print \"Overdrive url = \", od_url, \"\\n\"\n \"\"\" This is the url to the OverDrive integration test system \n\n \"\"\"\n od_url = 'https://oauth.overdrive.com/auth?client_id=LORETTAPOWELL&redirect_uri=http%3A%2F%2F10.1.10.175%3A5000%2Foauth_overdrive&scope=accountId:4425&response_type=code&state=turtle'\n # od_url = 'https://berkeleyca.libraryreserve.com/10/50/en/SignIn.htm?URL=SignOutConfirm%2ehtm'\n return redirect(od_url)", "def redirector(status, start_response, exc_info=None):\n session['login.pre_uri'] = environ['PATH_INFO']\n session.save()\n start_response('302 Found',[(\"Location\",\"/login\"),(\"Content-type\",\"text\")])\n return []", "def signin():\n scope = request.args.get(\n 'scope',\n 'identify')\n discord = make_session(scope=scope.split(' '))\n authorization_url, state = discord.authorization_url(AUTHORIZATION_BASE_URL)\n session['oauth2_state'] = state\n return redirect(authorization_url)", "def loginurl(request, response):\n from google.appengine.api import users as gusers\n urls = {}\n for p in openIdProviders:\n p_name = p.split('.')[-2]\n p_url = p.lower()\n try:\n url = gusers.create_login_url(federated_identity=p_url)\n if not url: url = create_openid_url(p_url)\n except TypeError: continue\n urls[p_name] = url\n return urls", "def login_success(user):\n return redirect('/')", "def oauth2callback(request):\n error = request.GET.get('error')\n if error:\n error_msg = request.GET.get('error_description', error)\n return HttpTextResponse(\n 'The authorization request failed: %s' % _safe_html(error_msg))\n else:\n user = request.user\n flow = _create_flow(request)\n credentials = flow.step2_exchange(request.GET)\n StorageByKeyName(\n CredentialsNDBModel, user.user_id(), 'credentials').put(credentials)\n redirect_uri = _parse_state_value(str(request.GET.get('state')),\n user)\n return HttpResponseRedirect(redirect_uri)", "def oauth2callback(request):\n error = request.GET.get('error')\n if error:\n error_msg = request.GET.get('error_description', error)\n return HttpTextResponse(\n 'The authorization request failed: %s' % _safe_html(error_msg))\n else:\n user = request.user\n flow = _create_flow(request)\n credentials = flow.step2_exchange(request.GET)\n StorageByKeyName(\n CredentialsNDBModel, user.user_id(), 'credentials').put(credentials)\n redirect_uri = _parse_state_value(str(request.GET.get('state')),\n user)\n return HttpResponseRedirect(redirect_uri)", "def test_open_id_setup(self):\r\n self.attempt_login(200)", "def redirect(url):" ]
[ "0.7310133", "0.69767207", "0.69368905", "0.67243433", "0.66221726", "0.6532379", "0.65151626", "0.639436", "0.63739187", "0.6283034", "0.62608516", "0.6209797", "0.6182559", "0.61617637", "0.61428356", "0.6127544", "0.61146307", "0.6099549", "0.6084637", "0.6081178", "0.6079942", "0.59975654", "0.59974575", "0.59820545", "0.5970255", "0.5968122", "0.5959851", "0.5959851", "0.5931354", "0.59190434" ]
0.83476585
0
Center protein so that the molecule is not broken by periodic boundary conditions
def center_protein(traj, inplace=True): create_bonds(traj.topology) return traj.image_molecules(inplace=inplace, make_whole=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def center(self, center_mass=False):\n if center_mass:\n com = self.center_of_mass\n self.xyz -= com\n else:\n self.xyz -= self.xyz.mean(0)", "def resetAlignmentCenter(self):\n cent = self.TiltSeries_._TiltAlignmentParas.cent\n imdimX = self.TiltSeries_._imdimX\n imdimY = self.TiltSeries_._imdimY\n print(imdimX, imdimY)\n if cent[0] != imdimX//2+1 or cent[1] != imdimY//2+1:\n #rint \"Centers do not match: cent=\"+str(cent)+\", imdim=\"+str(imdim)\n self.TiltSeries_._TiltAlignmentParas.cent = [imdimX//2+1, imdimY//2+1]", "def set_center(self,structure):\n for i,b in enumerate(self.bfs):\n b.set_center( structure[ self.LIST1[i] ] ) \n return", "def center(self):\n cp = self.dat.flowsheet.getCenter()\n self.centerOn(cp[0], cp[1])", "def center_of_bees(self):\n pos, com, success = self.perception\n return around(sum(pos)/(len(pos) + 1))", "def get_center_of_mass_allies(self,obs):", "def centerAxis():\n dislin.center()", "def compute_center(self, mole_object):\r\n if mole_object.plugin_type == \"PyMOL\":\r\n sel = PymolPlugin.PymolPlugin().get_model('all')\r\n cnt = len(sel.atom)\r\n\r\n else:\r\n sel = ChimeraPlugin.ChimeraPlugin().select()\r\n cnt = len(ChimeraPlugin.ChimeraPlugin().current_atoms())\r\n\r\n cent_x = 0\r\n cent_y = 0\r\n cent_z = 0\r\n\r\n if cnt == 0:\r\n return 0, 0, 0\r\n\r\n if mole_object.plugin_type == \"PyMOL\":\r\n\r\n for a in sel.atom:\r\n cent_x += a.coord[0]\r\n cent_y += a.coord[1]\r\n cent_z += a.coord[2]\r\n\r\n else:\r\n\r\n for a in ChimeraPlugin.ChimeraPlugin().current_atoms():\r\n cent_x += a.coord()[0]\r\n cent_y += a.coord()[1]\r\n cent_z += a.coord()[2]\r\n\r\n cent_x /= cnt\r\n cent_y /= cnt\r\n cent_z /= cnt\r\n\r\n self.point_x.component('entryfield').setentry(cent_x)\r\n self.point_y.component('entryfield').setentry(cent_y)\r\n self.point_z.component('entryfield').setentry(cent_z)\r\n\r\n self.show_crisscross(mole_object)", "def center(self):\n return np.array([0,0,1/self.C+self.pos()])", "def find_center_of_mass(selection='(all)', state=-1):\n state = utils.int_to_state(state)\n model = cmd.get_model(selection, state=state)\n com = cpv.get_null()\n # iterate all atoms and add vectors of center of mass of each atoms\n for atom in model.atom:\n com = cpv.add(com, atom.coord)\n com = cpv.scale(com, 1.0 / len(model.atom))\n return com", "def center(self,c, ADDR):\r\n #FIGURE OUT HOW TO DO THIS\r\n #Actually pretty sure this is impossible to do from software\r\n returnValue('Success!')", "def center(self):\n if self.pos != 0.0:\n self.pos = 0.0", "def center_of_mass_polyhedron():\n raise NotImplementedError", "def center_of_gravity(self):\n weights = [self.stabilizer_h.weight, self.stabilizer_vright.weight, self.stabilizer_vleft.weight]\n cgs = [self.stabilizer_h.center_of_gravity, self.stabilizer_vright.center_of_gravity,\n self.stabilizer_vleft.center_of_gravity]\n total_weight = sum(weights)\n cg_x = sum([weights[i] * cgs[i].x for i in range(0, len(weights))]) / total_weight\n cg_y = sum([weights[i] * cgs[i].y for i in range(0, len(weights))]) / total_weight\n cg_z = sum([weights[i] * cgs[i].z for i in range(0, len(weights))]) / total_weight\n\n return Point(cg_x, cg_y, cg_z)", "def to_center():\n center_msg = b'\\x02\\x35\\x35\\x03'\n #qpt.write(center_msg)\n #feedback = qpt.readline()\n move_to_position(0,0)\n #return feedback", "def center_of_mass(self, matrix):\n # Changing the positions of all objects relative to center of mass, in origo.\n x, y, z = np.sum(matrix[:, 0].reshape(self.numbodies, 1)*matrix[:, 1:4], axis=0)/(np.sum(matrix[:, 0], axis=0))\n print('Center of mass located at (%.4g, %.4g, %.4g)' %(x, y, z))\n # x-direction\n matrix[:, 1] = matrix[:, 1]-x\n # y-direction\n matrix[:, 2] = matrix[:, 2]-y\n # z-direction\n matrix[:, 3] = matrix[:, 3]-z\n # The Suns initial velocity which makes the total momentum of the system zero\n # velcity_sun = sum(mass_planet_i*veocity_planet_i)/(mass_sun)\n u, v, w = np.sum(matrix[:, 0].reshape(self.numbodies, 1)*matrix[:, 4:7], axis=0)/(matrix[0, 0])\n print('The initial velocity of the Sun (%.4g, %.4g, %.4g)' %(u, v, w))\n matrix[0, 4:7] = u, v, w\n # Returning the modified matrix\n return matrix", "def center_of_mass(molecule):\n xcom=ycom=zcom=0\n totm = 0\n for atom in get_atoms(molecule):\n m = get_mass(atom)\n x,y,z = get_xyz(atom)\n xcom += m*x\n ycom += m*y\n zcom += m*z\n totm += m\n xcom /= totm\n ycom /= totm\n zcom /= totm\n return xcom,ycom,zcom", "def center(self):\n try: \n return self._center\n except AttributeError:\n self._center = vector(ZZ, [0]*self.ambient_dim())\n for v in self.vertex_generator(): self._center += v.vector()\n self._center /= self.n_vertices()\n return self._center", "def phase_center(self):\n try:\n rx_number = extract_channel_number(self.title)\n ph_center = (_np.array(self.GPRI_tx_coord) + _np.array(\n getattr(self, \"GPRI_rx{num}_coord\".format(num=rx_number)))) / 2\n return ph_center\n except AttributeError:\n return 0", "def _centre(self, period):\n if self.direction():\n mx = self.data[-1]\n else:\n mx = self.data[0]\n\n return ((mx // period) * period).squeeze()", "def getcenter(self):\n return self.centro.cartesianas()", "def center_of_mass(self, tolerance=1e-9):\n props = GProp_GProps()\n brepgprop_VolumeProperties(self.topods_solid(), props, tolerance)\n com = props.CentreOfMass()\n return geom_utils.gp_to_numpy(com)", "def center_on_spawn(self):\n self.center_on(*self.world.metadata['playerStart'])", "def center(self, obj):\n mn0 = self.master.xy >= obj.center\n mn1 = self.master.xy <= obj.center\n\n point_list = [self.master.xy[mn0], self.master.xy[mn1], self.master.xy[mn0[0], mn1[1]], self.master.xy[mn1[0], mn0[1]]] # 4 physical points near the center coordinate.\n dist_list = []\n idx = 0\n for point in point_list:\n dist_list.append([idx, np.linalg.norm(point - obj.center)]) # Calculate Euclidean distances.\n idx += 1\n dist_sorted = sorted(dist_list, key=lambda distance : distance[1]) # Sort distances in ascending order.\n return self.master.mn(point_list[dist_sorted[0][0]]) # Convert the closest point to abstract coordinate and then return.", "def setCenter(self, p):\n self.__center = p", "def center(self):\n return self._lower + 0.5 * (self._upper - self._lower)", "def center(self):\n return self.centralizer(self)", "def center(self, obj):\n return self.phy2abs.center(obj)", "def rotation_pivot_to_center(self):\n pass", "def _center(pos, shift):\n x = np.concatenate((pos[0], pos[0] + shift[0]))\n y = np.concatenate((pos[1], pos[1] + shift[1]))\n return (x.max() + x.min()) / 2, (y.max() + y.min()) / 2" ]
[ "0.6612349", "0.65694755", "0.6376795", "0.63726264", "0.6337984", "0.6238661", "0.61947876", "0.619474", "0.61426926", "0.61275536", "0.6102843", "0.6085959", "0.60810447", "0.6072383", "0.60476613", "0.60178554", "0.6017457", "0.59851617", "0.59755105", "0.59694713", "0.5925088", "0.59119576", "0.58953804", "0.58908755", "0.5887605", "0.5881327", "0.58780944", "0.5864116", "0.58577913", "0.5844009" ]
0.75121605
0
Return the number of circular references to the object. For the purposes of this function, the circular reference must be only accessible (directly or indirectly) through the object.
def circular_reference_count(obj: typing.Any) -> int: if np is not None: result = _numpy_circular_ref_count(obj) if result is not NotImplemented: return result return _get_circular_ref_count(obj)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def n_refs(self):\n return self._n_refs", "def referencecount(self) :\n\t\ttry :\n\t\t\treturn self._referencecount\n\t\texcept Exception as e:\n\t\t\traise e", "def get_size(obj: Any) -> int:\n if isinstance(obj, BLACKLIST):\n return 0\n seen_ids: set[int] = set()\n size = 0\n objects = [obj]\n while objects:\n need_referents = []\n for obj_ in objects:\n if not isinstance(obj_, BLACKLIST) and id(obj_) not in seen_ids:\n seen_ids.add(id(obj_))\n size += sys.getsizeof(obj_)\n need_referents.append(obj_)\n objects = gc.get_referents(*need_referents)\n return size", "def refCount(self, node):\n return self._references.get(node, 0)", "def nreferences(self):\n return self.__nreferences", "def associated_object_count(self):\n return self._associated_object_count", "def num_cochains(self) -> int:\n if self.__num_cochains__ is not None:\n return self.__num_cochains__\n return self.ptr.numel() + 1", "def num_global_external_references(self):\n if self.global_external_references:\n return len(self.global_external_references)\n else:\n return 0", "def cycles(self):\n return len(self.cyclic_form)", "def getNumReferents(self):\n return _libsbml.SBaseRef_getNumReferents(self)", "def __len__(self):\n total_objs = 0\n\n if self._shelve is not None:\n total_objs += len(self._shelve)\n\n if self._dict is not None:\n total_objs += len(self._dict)\n\n return total_objs", "def size(self):\n\n size = 1\n traverse = self.front\n if self.front == None:\n return 0\n\n while traverse.next != None:\n traverse = traverse.next\n size += 1\n return size", "def size(self):\n\n size = 1\n traverse = self.front\n if self.front == None:\n return 0\n\n while traverse.next != None:\n traverse = traverse.next\n size += 1\n return size", "def size(self):\n count = 0\n current = self.front\n\n while current is not None:\n current = current.getPtr()\n count += 1\n\n return count", "def _get_objects_length(self) -> int:\n return len(self.objects)", "def size(self):\n traverse = self.head\n count = 0\n while traverse.next != None:\n traverse = traverse.next\n count += 1\n return count + 1", "def size(self):\n traverse = self.head\n count = 1\n while traverse.next != None:\n traverse = traverse.next\n count += 1\n return count", "def len(self):\n start = self.head\n count = 0\n while start:\n count+=1\n start = start.getLink()\n return count", "def __len__(self) -> int:\n return 1 + sum(len(child) for child in self.children)", "def complexity(self):\n n = 0\n ctx = self\n while ctx is not None:\n n += 1\n ctx = ctx.parent()\n return n", "def getrefcount(p_object): # real signature unknown; restored from __doc__\n return 0", "def length(self): # Class O(n)\r\n h = self.head\r\n size = 1\r\n while 'next' in dir(h.next):\r\n size += 1\r\n h = h.next\r\n return size", "def count_weaks():\n summary = dict()\n complete = list(StorableObject._weak_cache)\n for obj in complete:\n name = obj.base_cls_name\n summary[name] = summary.get(name, 0) + 1\n\n return summary", "def count_dependencies(self, stack):\n return self.__graph.in_degree(stack)", "def break_count(self):\n return len(self.link_ids) + len(self.crossring_cleavages)", "def nb_objects(self) -> int:\n return 0", "def refspec_count(self):\n\n return C.git_remote_refspec_count(self._remote)", "def __len__(self):\n return 1 + sum([len(child) for child in self.children])", "def count(self):\n return len(self.__links)", "def countRecursiveReferences(self) -> Tuple[Dict[str, List[str]], Dict[str, Dict[str, int]]]:\n countReferences = {}\n isReferencedBy = {}\n for nt in self.non_terminals: # for each non-terminal in the grammar\n for production in self.grammar[nt]: # for each possible production on that non terminal\n count = {}\n for option in production.split(): # iterate over the production's terms\n count.setdefault(option, 0)\n if option in self.non_terminals: # if the term is a non terminal\n count[option] += 1 # the number of times that option has been referenced increases\n isReferencedBy.setdefault(option, set())\n isReferencedBy[option].add(nt)\n \n for key in count:\n count.setdefault(key, 0)\n countReferences.setdefault(key, {})\n countReferences[key].setdefault(nt, 0)\n\n countReferences[key][nt] = max(\n countReferences[key][nt], count[key]) # the number of references of the non terminal is for this\n # term is the maximum between all productions in this non terminal\n\n return isReferencedBy, countReferences" ]
[ "0.7472081", "0.712911", "0.6964042", "0.6863925", "0.68219185", "0.678775", "0.6773094", "0.67412645", "0.6582645", "0.65278816", "0.64337957", "0.6420373", "0.6420373", "0.64150935", "0.6401749", "0.63989073", "0.6373764", "0.63708293", "0.6358727", "0.63216347", "0.63204354", "0.6294413", "0.6288663", "0.6243745", "0.6230707", "0.62283033", "0.6226508", "0.62160015", "0.6206884", "0.62062407" ]
0.85062957
0
Enable purging. Only with purging, pyweakref.ref instances will be weak references, not strong references.
def enable_purging() -> None: global _purge, _purge_timer if not purging(): _purge = True _purge_timer = threading.Timer(5.0, _purge_func) _purge_timer.start()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def release(self):\n del self.ref\n self.ref = None\n gc.collect()", "def _enable_weakref(self) -> None:\n self._tx_weakref_disabled = False", "def disable_purging() -> None:\r\n global _purge, _purge_timer\r\n if purging():\r\n _purge = False\r\n _purge_timer.cancel()", "def purge() -> None:\r\n _purge_func(False)", "def _disable_weakref(self) -> None:\n self._tx_weakref_disabled = True", "def purging() -> bool:\r\n return _purge", "def __del__(self):\r\n self.release()", "def __del__(self):\n self.release()", "def __del__(self):\n self.release()", "def __del__(self):\n self.release()", "def __del__(self):\n self.release()", "def __del__(self):\n self.release()", "def __del__(self):\n self.release()", "def purge(self):\n pass", "def __del__(self):\n if self.is_locked:\n self.release()", "def weak(self):\n return self", "def gc_disable():\n raise NotImplementedError()", "def release(self):\n self.acquired = False", "def remove_refs(self):\n\n self.reference = None\n self.url = None", "def gc_enable():\n raise NotImplementedError()", "def purge(self, **options):\n pass", "def __del__(self):\n \n pass", "def weakref_proxy(*args, **kwargs):\n\n pass", "def weakref_proxy(*args, **kwargs):\n\n pass", "def _remove_from_weakref(self, tx: BaseTransaction) -> None:\n if self._tx_weakref_disabled:\n return\n assert tx.hash is not None\n self._tx_weakref.pop(tx.hash, None)", "def __del__(self):\n return False", "def __del__(self):\n # weakref callbacks are rather low level, and working out how to use\n # them correctly requires a bit of head scratching. One must find\n # somewhere to store the weakref till after the referent is dead, and\n # without accidentally keeping the referent alive. Then one must\n # ensure that the callback frees the weakref (without leaving any\n # remnant ref-cycles).\n #\n # When it is an option, using a __del__ method is far less hassle.\n #\n # Source: https://bugs.python.org/issue15528\n self._finalize()", "def _purge():\r\n _cache.clear()", "def track_ref_for_deletion(self, ref):\n if ref not in self.__refs_for_deletion:\n self.__refs_for_deletion.append(ref)", "def __del__(self):\n\n if self._needs_release:\n send_message(self, \"release\", restype=objc_id, argtypes=[])" ]
[ "0.6430023", "0.63898516", "0.63580555", "0.6267979", "0.6240635", "0.62111545", "0.61647785", "0.61381817", "0.61381817", "0.61381817", "0.61381817", "0.61381817", "0.61381817", "0.611086", "0.6087001", "0.60550416", "0.60219187", "0.5966864", "0.5905727", "0.5900416", "0.58211887", "0.5815078", "0.57996035", "0.57996035", "0.5780808", "0.5766967", "0.5753906", "0.57455534", "0.57162404", "0.57049316" ]
0.65232295
0
Return all pyweakrefs to obj. If none, return an empty list.
def get_pyweakrefs(obj: typing.Any) -> list[ReferenceType]: seq = _reference_id_registry.get(id(obj), []) return [seq[0] for item in seq]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_refobjs(self, ):\n return cmds.ls(type=\"jb_reftrack\")", "def list_refs(self):\n pass", "def _refs(self, items):\n # type: (Iterable[Any]) -> Iterable[weakref.ReferenceType]\n return map(self.ref, items)", "def list_all_refs(self):\n self.list_refs()\n self.list_ref0s()\n self.list_defect_refs()", "def refs(self):\n return self._refs", "def keyrefs(self):\n return [ref(key) for key in self.iterkeys()]", "def get_class_refs(self):\n return list(self._get_class_refs().values())", "def find_references(self):\n cls = self.__class__\n nodes = []\n for sobj in self._std.FindDependances(self.get_sobj()):\n nodes.append(cls(self._std, self._bld, sobj.GetID()))\n return nodes", "def get_references(self):\n\n return self._refs", "def get_downstream_objects(obj):\n # gcl = gc.get_objects()\n olist = []\n seen = {}\n # Just in case:\n # seen[id(gcl)] = None\n seen[id(olist)] = None\n seen[id(seen)] = None\n # _getr does the real work.\n _getr([obj], olist, seen)\n return olist", "def get_all_objects():\n gc.collect()\n gcl = gc.get_objects()\n olist = []\n seen = {}\n # Just in case:\n seen[id(gcl)] = None\n seen[id(olist)] = None\n seen[id(seen)] = None\n # _getr does the real work.\n _getr(gcl, olist, seen)\n return olist", "def references(self) -> \"IterableList[Reference]\":\n return Reference.list_items(self)", "def RawRefs(self, default=[{}]):\n tmp = self.data.get('raw_refs', default)\n return [HEP.RawReferenceObject(i) for i in tmp]", "def get_all_objects():\n gcl = gc.get_objects()\n olist = []\n seen = {}\n # Just in case:\n seen[id(gcl)] = None\n seen[id(olist)] = None\n seen[id(seen)] = None\n # _getr does the real work.\n _getr(gcl, olist, seen)\n return olist", "def list_refs(self):\n print('----\\nREFs\\n----')\n self._print_dict(self.refs)", "def function_refs(self) -> List[FunctionReference]:\n return self._function_refs", "def get_all_objects():\n gcl = gc.get_objects()\n olist = []\n seen = {}\n # Just in case:\n seen[id(gcl)] = None\n seen[id(olist)] = None\n seen[id(seen)] = None\n # _getr does the real work.\n _getr(gcl, olist, seen)\n return olist", "def keyrefs(self):\n return list(self.data)", "def resolve_all_refs(s):\n refs = []\n # ask all graphs for REFs\n for graph in s.graphs.values():\n refs.extend( graph.list_of_all_unpointed_refs() )\n\n # resolve collected refs\n for ref in refs:\n ref.resolve()\n\n return len( refs )", "def _get_all_tracked_objects(self):\n all = []\n for obj in gc.get_objects():\n if any([mod.is_module_object(obj) for mod in self.tracked_modules]):\n all.append(TrackedObject(obj))\n return all", "def hbObjects(self):\r\n return self.__hbObjs", "def references_list( self, theWeaver ):\n return [ (c.name, c.seq) \n for c in theWeaver.reference_style.chunkReferencedBy( self ) ]", "def get_crefs_from( ea ):\r\n\tret = []\r\n\txrf = get_first_cref_from( ea )\r\n\tif xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_cref_from( ea, xrf )\r\n\twhile xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_cref_from( ea, xrf )\r\n\treturn ret", "def get_crossrefs(self):\n return self._crossrefs", "def get_object_references(self, value):\n return set()", "def get_object (self) :\n\n # object is a weak_ref, and may have been garbage collected - we simply\n # return 'None' then\n return self._object ()", "def resolve_all_refs(s):\n for ref in list_of_all_unpointed_refs():\n ref.resolve()", "def get_refs(p):\n refs = []\n u = cPickle.Unpickler(cStringIO.StringIO(p))\n u.persistent_load = refs\n u.noload()\n u.noload()\n for ref in refs:\n if isinstance(ref, tuple):\n yield ref[0]\n elif isinstance(ref, str):\n yield ref\n else:\n assert isinstance(ref, list)\n yield ref[1][:2]", "def list_defect_refs(self):\n print('-----------\\nDEFECT_REFs\\n-----------')\n self._print_dict(self.defect_refs)", "def references(self):\n return self._get_related_resources(False)" ]
[ "0.69156635", "0.68716735", "0.6771153", "0.6550949", "0.6442704", "0.64174813", "0.6346928", "0.6336576", "0.6197461", "0.61949223", "0.6137897", "0.6096993", "0.60405004", "0.60172296", "0.5993775", "0.5944581", "0.5942475", "0.59189206", "0.5828131", "0.57902074", "0.57866716", "0.57688767", "0.57674515", "0.576673", "0.5736151", "0.57127863", "0.57102233", "0.5686917", "0.56822276", "0.5670329" ]
0.85791713
0
Return if purging is enabled. Without purging, pyweakref.ref instances will not be weak references, rather strong references.
def purging() -> bool: return _purge
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enable_purging() -> None:\r\n global _purge, _purge_timer\r\n if not purging():\r\n _purge = True\r\n _purge_timer = threading.Timer(5.0, _purge_func)\r\n _purge_timer.start()", "def reuseable(self) -> bool:\n return self._reuseable", "def disable_purging() -> None:\r\n global _purge, _purge_timer\r\n if purging():\r\n _purge = False\r\n _purge_timer.cancel()", "def gc_enabled():\n raise NotImplementedError()", "def retain(self):\n return True", "def checkRefs(self, export_refs):\r\n return True", "def is_fresh(self):\n return not self.used", "def _enable_weakref(self) -> None:\n self._tx_weakref_disabled = False", "def __del__(self):\n return False", "def is_weak(self):\n return self.binding == 'STB_WEAK'", "def purge() -> None:\r\n _purge_func(False)", "def purge(self):\n if os.path.exists(self.lockfile):\n self.release()\n return True\n return False", "def _disable_weakref(self) -> None:\n self._tx_weakref_disabled = True", "def purge_protection_enabled(self) -> bool:\n return pulumi.get(self, \"purge_protection_enabled\")", "def should_check_refcount(self):\n raise NotImplementedError()", "def get_spooled(self):\r\n return True", "def weak(self):\n return self", "def optimized_for_frequent_attach(self) -> Optional[bool]:\n return pulumi.get(self, \"optimized_for_frequent_attach\")", "def is_all_free(self):\n return self.pool_size == self.pool.qsize()", "def reference_only(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"reference_only\")", "def gc_enable():\n raise NotImplementedError()", "def gethooverable(self):\n try:\n return self.hooverable\n except:\n return False", "def get_released(self):\n\n self.update()\n\n if self.released:\n self.released = False\n return True\n\n return False", "def supports_refcounts(self):\n return sys.implementation.name == \"cpython\"", "def is_cycle_suppressed(self):\n self.suppression_used = True\n return self._cyclesuppression is not None", "def watching(self):\n return self.get() in self._refs", "def is_free(self):\n return self._size > 0", "def get_weakness(self):\r\n return self.weakness", "def disable_referential_integrity(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"disable_referential_integrity\")", "def objects_in_use(self):\n return set()" ]
[ "0.5966954", "0.59007514", "0.58261746", "0.57176876", "0.5714088", "0.5536531", "0.5514175", "0.5476762", "0.54592717", "0.5433333", "0.5414477", "0.53703785", "0.53387326", "0.531639", "0.52955073", "0.52908105", "0.52488065", "0.5241981", "0.5231054", "0.5223706", "0.52126664", "0.52010596", "0.5192512", "0.5181258", "0.51647544", "0.51510453", "0.5138412", "0.51309204", "0.5068187", "0.5068088" ]
0.6741145
0
Generates previews on the timeline
def generate_previews(source: BaseSource=None, force=False): entry_filters = {'extra_attributes__has_key': 'file'} if source: entry_filters['source'] = source.entry_source entries = Entry.objects.filter(**entry_filters) entry_count = len(entries) log_message = f"Generating previews for {entry_count} entries" if source: log_message += f' from {source}' if force: log_message += ', and overwriting existing previews' logger.info(log_message) missing_entry_count = 0 with transaction.atomic(): for index, entry in enumerate(entries): # Delete orphaned entries (for example if the backup gets deleted) if not Path(entry.extra_attributes['file']['path']).exists(): logger.error(f"Entry #{entry.id} does not exist at {entry.extra_attributes['file']['path']}") missing_entry_count += 1 entry.delete() continue logger.debug(f"Processing entry {index + 1}/{entry_count}" f" (#{entry.id} - {entry.extra_attributes['file']['path']})") if processing_tasks := _get_preview_processing_tasks(entry): logger.debug(f"Generating preview for {str(entry)} at {entry.extra_attributes['file']['path']}") for task in processing_tasks: try: task(entry, overwrite=force) except KeyboardInterrupt: raise except: logger.exception(f"Could not generate preview for entry #{entry.pk} " f"({ str(Path(entry.extra_attributes['file']['path'])) }).") entry.save() if missing_entry_count == 0: logger.info(f"Generated previews for {len(entries)} entries.") else: logger.warning(f"Generated previews for {len(entries)} entries, " f"{missing_entry_count} orphaned entries removed")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getPreviews(self):\n logger.debug(\"Func: getPreviews\")\n \n # return \"ASDFASDF\"\n return sorted(self._currentPreviewsDict.keys())", "def get_all_game_previews(self):\n state = 'preview.gameData.status.detailedState'\n return self._db.Games.find({'date' : self._day,\n state : 'Scheduled'})", "def renderPreviews(objects, imagePath):\n prefs = getPreferences()\n\n paddingFactor = 1 - (prefs.padding_value * 0.01)\n\n if objects:\n # Set all objects to be invisible for rendering and store their\n # previous state.\n renderState = setRenderable(bpy.data.objects, hidden=True)\n\n for obj in objects:\n # Select the current object that the camera can focus on it.\n if isinstance(obj, bpy.types.Collection):\n for mesh in collectionMeshes(obj):\n setObjectRenderable(mesh, True)\n else:\n setObjectRenderable(obj, True)\n\n # Frame the selection and adjust the focal length to allow\n # for a safe framing area.\n bpy.ops.view3d.camera_to_view_selected()\n cache.values[\"camera\"].data.lens = prefs.focal_value * paddingFactor\n\n # Render the preview.\n bpy.ops.render.render(write_still=True)\n\n # Rename the output file to match the object's name.\n tempFile = \".\".join((os.path.join(imagePath, IMAGE_NAME), \"png\"))\n thumbFile = \".\".join((os.path.join(imagePath, obj.name), \"png\"))\n os.rename(tempFile, thumbFile)\n\n # Reset the focal length of the camera.\n cache.values[\"camera\"].data.lens = prefs.focal_value\n\n # Deselect the object and hide it from rendering.\n if isinstance(obj, bpy.types.Collection):\n for mesh in collectionMeshes(obj):\n setObjectRenderable(mesh, False)\n else:\n setObjectRenderable(obj, False)\n\n # Reset the previous render state for all objects.\n resetRenderable(renderState)\n\n # Mark the selected objects as assets and assign their custom\n # preview image.\n # Usually copying the context shouldn't cause an error but just\n # for safety an error gets intercepted.\n try:\n override = bpy.context.copy()\n except TypeError:\n return ({'ERROR'}, (\"An error occurred while trying to access the context \" +\n \"for loading the custom preview\"))\n\n for obj in objects:\n if not obj.asset_data:\n obj.asset_mark()\n\n override['id'] = obj\n thumbFile = \".\".join((os.path.join(imagePath, obj.name), \"png\"))\n bpy.ops.ed.lib_id_load_custom_preview(override, filepath=thumbFile)", "def preview():\n return render_template(\"controls/preview.html\")", "def _place_previews(self, frame_dims):\n if self._previewcache.get(\"images\", None) is None:\n logger.debug(\"No images in cache. Returning None\")\n return None\n samples = self._previewcache[\"images\"].copy()\n num_images, thumbnail_size = samples.shape[:2]\n if self._previewcache[\"placeholder\"] is None:\n self._create_placeholder(thumbnail_size)\n\n logger.debug(\"num_images: %s, thumbnail_size: %s\", num_images, thumbnail_size)\n cols, rows = frame_dims[0] // thumbnail_size, frame_dims[1] // thumbnail_size\n logger.debug(\"cols: %s, rows: %s\", cols, rows)\n if cols == 0 or rows == 0:\n logger.debug(\"Cols or Rows is zero. No items to display\")\n return None\n remainder = (cols * rows) - num_images\n if remainder != 0:\n logger.debug(\"Padding sample display. Remainder: %s\", remainder)\n placeholder = np.concatenate([np.expand_dims(self._previewcache[\"placeholder\"],\n 0)] * remainder)\n samples = np.concatenate((samples, placeholder))\n\n display = np.vstack([np.hstack(samples[row * cols: (row + 1) * cols])\n for row in range(rows)])\n logger.debug(\"display shape: %s\", display.shape)\n return Image.fromarray(display)", "def set_preds_as_viewed(preds):\n predictions.set_preds_as_viewed(preds)", "def markov_story():\n return render_template(\"markovstory.html\")", "def studio_preview_view(self, context):\r\n fragment = Fragment()\r\n self.render_reorderable_children(context, fragment)\r\n return fragment", "def preview(request):\n ctx = {}\n \n ctx[\"area\"] = \"bookmarks\"\n ctx[\"preview_theme\"] = request.GET.get(\"t\", \"light\")\n ctx[\"bookmarks\"] = Bookmark.by_user(request.user)[:5]\n \n return TemplateResponse(request, \"users/preview.html\", ctx)", "def generate_explore_views(self):\n views = []\n if self._safety_surface[\"type\"] == \"circle\":\n # Generate points evently distributed on the circle\n center = self._safety_surface[\"center\"]\n center = Vector3r(center[0], center[1], center[2])\n x0 = center.x_val\n y0 = center.y_val\n z0 = center.z_val\n radius = self._safety_surface[\"radius\"]\n TOTAL_NUM = self._config[\"point_num\"]\n ROUND_NUM = self._config.get(\"round_num\", 1)\n delta_theta = 2 * math.pi / (TOTAL_NUM / ROUND_NUM)\n\n for i in range(TOTAL_NUM):\n theta = delta_theta * i\n x = x0 + radius * math.sin(theta)\n y = y0 + radius * math.cos(theta)\n pitch = -45\n views.append(\n {\n \"position\": Vector3r(x, y, z0),\n \"yaw\": -1 * (0.5 * math.pi + theta),\n \"pitch\": pitch,\n }\n )\n elif self._safety_surface[\"type\"] == \"cylinder\":\n # Generate points spiral the cylinder\n top_center = self._safety_surface[\"top_center\"]\n top_center = Vector3r(top_center[0], top_center[1], top_center[2])\n x0 = top_center.x_val\n y0 = top_center.y_val\n bottom = self._safety_surface.get(\"bottom\", 0)\n height = top_center.z_val - bottom\n radius = self._safety_surface[\"radius\"]\n TOTAL_NUM = self._config[\"point_num\"]\n ROUND_NUM = self._config.get(\"round_num\", 1)\n START_PITCH = self._config.get(\"start_pitch\", -45)\n END_PITCH = self._config.get(\"end_pitch\", 45)\n delta_theta = 2 * math.pi / (TOTAL_NUM / ROUND_NUM)\n delta_height = height / (TOTAL_NUM - 1)\n delta_pitch = (END_PITCH - START_PITCH) / TOTAL_NUM\n for i in range(TOTAL_NUM):\n theta = delta_theta * i\n x = x0 + radius * math.sin(theta)\n y = y0 + radius * math.cos(theta)\n z = bottom + i * delta_height\n pitch = START_PITCH + i * delta_pitch\n views.append(\n {\n \"position\": Vector3r(x, y, z),\n \"yaw\": -1 * (0.5 * math.pi + theta),\n \"pitch\": pitch / 180 * math.pi,\n }\n )\n else:\n print(\n \"OfflineNavigator: unknown type of safety_surface (%s)\"\n % self._safety_surface[\"type\"]\n )\n\n return views", "def updatePreviews(self):\n\tsession = requests.Session()\n\tsession.headers.update({'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:47.0) Gecko/20100101 Firefox/47.0'})\n\tr = crack(session, session.get('https://www.whoscored.com'))\n\tprint r.content", "def get_team_game_previews(self, team, dates):\n abbr = convert_name(team, how='abbr')\n return self._db.Games.find({'date' : {'$in' : dates},\n '$or' : [{'home' : abbr},\n {'away' : abbr}]})", "def makeVideo():\n weekNumber = 11\n for _ in range(10):\n df = loadDbIntoDf2('trending')\n df_copy = df.copy()\n df_shorter = selectTop(df_copy,'week',weekNumber , 'trending')\n vid_dl = download(df_shorter,weekNumber)\n merge(vid_dl,weekNumber)\n weekNumber = weekNumber + 1", "def draw_tile_previews(screen, width, tile_previews, rect_list, selected=0):\n menu_background = (180, 180, 180) #Light Grey\n highlight_width = 3\n highlight_color = (0, 255, 0) #Bright Green\n block_color = (255, 255, 255, 170) #White with transparancy\n spacing = width/len(tile_previews)\n menu = pygame.draw.rect(screen, menu_background, [0, 0, width, 50])\n for index, tile in enumerate(tile_previews):\n r = screen.blit(tile, (index * spacing, 25))\n if index == selected:\n pygame.draw.rect(screen, highlight_color, r, highlight_width)\n else:\n # pylint: disable=c-extension-no-member\n # C extension error for gfxdraw\n pygame.gfxdraw.box(screen, r, block_color)\n rect_list.append(pygame.Rect.copy(menu))\n return rect_list", "def designTest(request):\n\n MAX_NEWS = 10\n start_id = '0'\n end_id = string.atoi(start_id) + 10\n\n news_count = New.objects.count() # Pocet vsech zaznamu novinek\n news_list = New.objects.all().order_by(\"-date\")[start_id:end_id] # Sort by date ... and only part of list\n # misto vsech zaznamu ziskat jen ty v intervalu start - stop -> API\n\n # Vypocet prvniho ID z predchozi skupiny novinek (jedna skupina = MAX_NEWS) \n start_id_num = string.atoi(start_id)\n if (start_id_num + MAX_NEWS) < news_count:\n preview_start_id = start_id_num + MAX_NEWS\n else:\n preview_start_id = start_id_num\n\n # Vypocet prvniho ID z nasledujici skupiny novinek (jedna skupina = MAX_NEWS) \n next_start_id = start_id_num - MAX_NEWS # prvni ID nasledujicich novinek\n if next_start_id < 0:\n next_start_id = 0;\n\n pictureOfWeek = PhotoOfWeek.objects.last()\n context = {'news_list': news_list, 'news_count': news_count, 'pictureOfWeek': pictureOfWeek, 'start_id': start_id,\n 'preview_start_id': preview_start_id, 'next_start_id': next_start_id}\n return render(request, 'designTest/news_design_test.html', context)", "def general_timeline():\n return render_template('timeline.html', general=True, show_username=True)", "def preview(self,*args,**kwargs):\n self.cam.start_preview(*args,**kwargs)", "def videodetail(request, hash_key):\n # get video\n video_queryset = Video.objects.all().select_related('owner')\n video = get_object_or_404(video_queryset, hash_key=hash_key)\n \n clips = video.clips.all()\n \n # Are we reviewing the latest clip?\n start_on_last_clip = request.GET.get('latest', '')\n initialClipIndex = (len(clips) - 1) if start_on_last_clip else 0\n \n return render_to_response('video/details.html',\n {'video':video,\n 'clips':clips,\n 'initialClipIndex': initialClipIndex},\n context_instance=RequestContext(request))", "def show_visualizations(self, number = -1):\n instance = self.instance\n if number > instance.view_num:\n print(\"In function show_visualizations: Error, input number greater than the view numbers.\")\n return Page()\n if self.rank_method == methods_of_ranking[3]: # diversified_ranking\n G = myGraph(instance.view_num)\n for i in range(instance.view_num):\n view = instance.tables[instance.views[i].table_pos].views[instance.views[i].view_pos]\n G.addNode(view)\n G.getSim()\n result = G.getTopK(instance.view_num)\n if number != -1:\n begin = number - 1\n end = number\n else:\n begin = 0\n end = instance.view_num\n page = Page()\n for order in range(begin, end):\n if self.rank_method == methods_of_ranking[3]: # diversified_ranking\n view = G.nodes[result[order]]\n else:\n view = instance.tables[instance.views[order].table_pos].views[instance.views[order].view_pos]\n data = {}\n data['order'] = order\n data['chartname'] = instance.table_name\n data['describe'] = view.table.describe\n data['x_name'] = view.fx.name\n data['y_name'] = view.fy.name\n data['chart'] = Chart.chart[view.chart]\n data['classify'] = [v[0] for v in view.table.classes]\n data['x_data'] = view.X\n data['y_data'] = view.Y\n data['title_top'] = 5\n \n # 以下代码与html_handle相似\n margin = str(data['title_top']) + '%'\n \n if data['chart'] == 'bar':\n chart = (Bar().set_series_opts(label_opts=opts.LabelOpts(is_show=False))\n .set_global_opts(title_opts=opts.TitleOpts(title=data['chartname'], subtitle=data['describe'], pos_left='center', pos_top=margin),\n xaxis_opts=opts.AxisOpts(name=data['x_name']),\n yaxis_opts=opts.AxisOpts(name=data['y_name'], splitline_opts=opts.SplitLineOpts(is_show=True))))\n elif data['chart'] == 'pie': \n chart = (Pie().set_global_opts(title_opts=opts.TitleOpts(title=data['chartname'], subtitle=data['describe'], pos_left='center', pos_top=margin)))\n elif data['chart'] == 'line': \n chart = (Line().set_series_opts(label_opts=opts.LabelOpts(is_show=False))\n .set_global_opts(title_opts=opts.TitleOpts(title=data['chartname'], subtitle=data['describe'], pos_left='center', pos_top=margin),\n xaxis_opts=opts.AxisOpts(name=data['x_name']),\n yaxis_opts=opts.AxisOpts(name=data['y_name'], splitline_opts=opts.SplitLineOpts(is_show=True))))\n elif data['chart']== 'scatter': \n chart = (Scatter().set_series_opts(label_opts=opts.LabelOpts(is_show=False))\n .set_global_opts(title_opts=opts.TitleOpts(title=data['chartname'], subtitle=data['describe'], pos_left='center', pos_top=margin),\n xaxis_opts=opts.AxisOpts(type_='value', name=data['x_name'], splitline_opts=opts.SplitLineOpts(is_show=True)),\n yaxis_opts=opts.AxisOpts(type_='value', name=data['y_name'], splitline_opts=opts.SplitLineOpts(is_show=True))))\n else :\n print (\"not valid chart\")\n \n if not data[\"classify\"] :\n attr = data[\"x_data\"][0]\n val = data[\"y_data\"][0]\n if data['chart'] == 'bar': \n chart.add_xaxis(attr).add_yaxis(\"\", val, label_opts=opts.LabelOpts(is_show=False))\n elif data['chart'] == 'line': \n chart.add_xaxis(attr).add_yaxis(\"\", val, label_opts=opts.LabelOpts(is_show=False))\n elif data['chart'] == 'pie': \n chart.add(\"\", [list(z) for z in zip(attr, val)])\n elif data['chart'] == 'scatter': \n if isinstance(attr[0], str):\n attr = [x for x in attr if x != '']\n attr = list(map(float, attr))\n if isinstance(val[0], str):\n val = [x for x in val if x != '']\n val = list(map(float, val))\n chart.add_xaxis(attr).add_yaxis(\"\", val, label_opts=opts.LabelOpts(is_show=False))\n page.add(chart)\n else :\n attr = data[\"x_data\"][0]\n for i in range(len(data[\"classify\"])) :\n val = data[\"y_data\"][i]\n name = (data[\"classify\"][i][0] if type(data[\"classify\"][i]) == type(('a','b')) else data[\"classify\"][i])\n if i == 0:\n if data['chart'] != 'pie' and data['chart'] != 'scatter':\n chart.add_xaxis(attr)\n if data['chart'] == 'bar': \n chart.add_yaxis(name, val, stack=\"stack1\", label_opts=opts.LabelOpts(is_show=False))\n elif data['chart'] == 'line': \n chart.add_yaxis(name, val, label_opts=opts.LabelOpts(is_show=False))\n elif data['chart'] == 'pie': \n chart.add(\"\", [list(z) for z in zip(attr, val)])\n elif data['chart'] == 'scatter': \n attr_scatter = data[\"x_data\"][i]\n if isinstance(attr_scatter[0], str):\n attr_scatter = [x for x in attr_scatter if x != '']\n attr_scatter = list(map(float, attr_scatter))\n if isinstance(val[0], str):\n val = [x for x in val if x != '']\n val = list(map(float, val))\n chart.add_xaxis(attr_scatter).add_yaxis(name, val, label_opts=opts.LabelOpts(is_show=False))\n page.add(chart)\n return page", "def charts(self,req):\n self.player.overviewing=True", "def construct_timeline(self, timexList, docFeatList):\n if not docFeatList:\n return timexList \n \n \n self.timeReferences = self.create_time_references(docFeatList, timexList)\n \n self.timexImpactZones = self.create_timex_impact_zone(timexList)\n \n timexList = self.evaluate_all_relative_timexes(timexList, docFeatList)\n \n# (expDate, expConf) = self.estimate_exposure_date(self.timeReferences, timexList)\n ##: expDate is obtained based on the first time. \n ##: Update time reference and re-estimate exposure time\n \n \n ##: Update time references after some features obtain their time from time impact zones\n self.timeReferences = self.update_time_references_with_impact_zones(docFeatList, timexList)\n \n (expDate, expConf) = self.estimate_exposure_date(self.timeReferences, timexList)\n \n if expDate:\n self.exposureDate = expDate\n self.exposureDateConfidence = expConf \n self.timeReferences[('Vaccination', None, None, None, None, None, expConf)] = self.exposureDate\n self.timeReferences[('Injection', None, None, None, None, None, expConf)] = self.exposureDate\n \n (onsetDate, onsetConf) = self.estimate_onset_date(docFeatList)\n if onsetDate:\n self.onsetDate = onsetDate\n self.onsetDateConfidence = onsetConf \n \n ##: Final scan for all features without assigned date time \n for feat in docFeatList:\n if self.sentence_tags[feat.getSentNum()]!='NORMAL': continue\n if not feat.getTlink() or not feat.getTlink().getDateTime():\n ##: feautures in clause should not be assigned a time. They should have been given a time somewhere else\n if feat.inClause():\n feat = self.assign_feature_time_with_references(feat, self.timeReferences, feat.getStartPos())\n ##: TLink could still be None if no reference is found. Then use the time from time impact zones\n if feat.getTlink():\n continue\n \n if feat.getType()=='DRUG' and 'concomitant' in [tg[0] for tg in self.sentence_full_tags[feat.getSentNum()]]:\n feat = self.assign_time_to_concomitant_drug(feat, docFeatList)\n if feat.getTlink():\n continue\n \n if not self.timexImpactZones or feat.getStartPos() < self.timexImpactZones[0][0]: ##: feature locates before any time zones\n ##: Assignment on features in the begining for VAERS\n if self.reportType == 'vaers': \n feat = self.assign_feature_time_with_references(feat, self.timeReferences) \n continue\n \n feat = self.assign_feature_time_with_impact_zones(feat, self.timexImpactZones)\n \n return timexList", "def create_preview(message):", "def update_storyline(self):\n search_title = self.video_title.replace(\" \", \"+\")\n api_response = urllib.urlopen(\"https://api.themoviedb.org/3/search/\"+\n \"movie?api_key=0bf3188c8eacc47d41ae97\"+\n \"d1d6dd2cfc&query=\"+\n search_title)\n movie_data = api_response.read()\n api_response.close()\n if '\"overview\":\"' in movie_data:\n #finding first position of storyline from API data\n apos = movie_data.find('\"overview\":\"') + 12\n #finding last position of storyline\n bpos = movie_data.find('\",', apos)\n return movie_data[apos:bpos]\n else:\n return", "def pylda_vis(args, model, corpus, time_slices, pre):\n print(timestamp() + \" About to visualize...\", file=sys.stderr)\n for slice in range(len(time_slices)):\n doc_topic, topic_term, doc_lengths, term_frequency, vocab = model.dtm_vis(time=slice, corpus=corpus)\n vis_wrapper = pyLDAvis.prepare(topic_term_dists=topic_term,\n doc_topic_dists=doc_topic,\n doc_lengths=doc_lengths,\n vocab=vocab,\n term_frequency=term_frequency,\n sort_topics=True)\n pyLDAvis.save_html(vis_wrapper, pre + \"time_slice_\" + str(slice) + \".html\")\n print(timestamp() + \" Prepared time slice\", slice, \"for pyLDAvis...\", file=sys.stderr)", "def draw_pose(preds, img):\n humans = preds['predictions']\n for human in humans:\n pose_lines = human['pose_lines']\n for i, _ in enumerate(pose_lines):\n line = pose_lines[i]['line']\n cv2.line(img, (line[0], line[1]), (line[2], line[3]), COCO_COLORS[i], 3)", "def test_preview_post(self):\n pass", "def preview():\r\n html = create_html_report()\r\n return html", "def run(self):\n print('Running test of the markups in different views')\n\n #\n # first load the data\n #\n import SampleData\n sampleDataLogic = SampleData.SampleDataLogic()\n print(\"Getting MR Head Volume\")\n mrHeadVolume = sampleDataLogic.downloadMRHead()\n\n #\n # link the viewers\n #\n sliceLogic = slicer.app.layoutManager().sliceWidget('Red').sliceLogic()\n compositeNode = sliceLogic.GetSliceCompositeNode()\n compositeNode.SetLinkedControl(1)\n\n #\n # MR Head in the background\n #\n sliceLogic.StartSliceCompositeNodeInteraction(1)\n compositeNode.SetBackgroundVolumeID(mrHeadVolume.GetID())\n sliceLogic.EndSliceCompositeNodeInteraction()\n\n #\n # switch to conventional layout\n #\n lm = slicer.app.layoutManager()\n lm.setLayout(2)\n\n # create a fiducial list\n displayNode = slicer.vtkMRMLMarkupsDisplayNode()\n slicer.mrmlScene.AddNode(displayNode)\n fidNode = slicer.vtkMRMLMarkupsFiducialNode()\n slicer.mrmlScene.AddNode(fidNode)\n fidNode.SetAndObserveDisplayNodeID(displayNode.GetID())\n\n # make it active\n selectionNode = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSelectionNodeSingleton\")\n if (selectionNode is not None):\n selectionNode.SetReferenceActivePlaceNodeID(fidNode.GetID())\n\n # add some known points to it\n eye1 = [33.4975, 79.4042, -10.2143]\n eye2 = [-31.283, 80.9652, -16.2143]\n nose = [4.61944, 114.526, -33.2143]\n index = fidNode.AddFiducialFromArray(eye1)\n fidNode.SetNthFiducialLabel(index, \"eye-1\")\n index = fidNode.AddFiducialFromArray(eye2)\n fidNode.SetNthFiducialLabel(index, \"eye-2\")\n # hide the second eye as a test of visibility flags\n fidNode.SetNthFiducialVisibility(index, 0)\n index = fidNode.AddFiducialFromArray(nose)\n fidNode.SetNthFiducialLabel(index, \"nose\")\n\n self.logicDelayDisplay(\"Placed 3 fiducials\")\n\n # self.printViewAndSliceNodes()\n\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode1') == 0:\n self.logicDelayDisplay(\"Test failed: widget is not visible in view 1\")\n # self.printViewNodeIDs(displayNode)\n return False\n\n #\n # switch to 2 3D views layout\n #\n lm.setLayout(15)\n self.logicDelayDisplay(\"Switched to 2 3D views\")\n # self.printViewAndSliceNodes()\n\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode1') == 0 or self.widgetVisible(fidNode, 'vtkMRMLViewNode2') == 0:\n self.logicDelayDisplay(\"Test failed: widget is not visible in view 1 and 2\")\n # self.printViewNodeIDs(displayNode)\n return False\n\n #\n # show only in view 2\n #\n displayNode.AddViewNodeID(\"vtkMRMLViewNode2\")\n self.logicDelayDisplay(\"Showing only in view 2\")\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode1') == 1:\n self.logicDelayDisplay(\"Test failed: widget is not supposed to be visible in view 1\")\n # self.printViewNodeIDs(displayNode)\n return False\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode2') == 0:\n self.logicDelayDisplay(\"Test failed: widget is not visible in view 2\")\n # self.printViewNodeIDs(displayNode)\n return False\n\n #\n # remove it so show in all\n #\n displayNode.RemoveAllViewNodeIDs()\n self.logicDelayDisplay(\"Showing in both views\")\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode1') == 0 or self.widgetVisible(fidNode, 'vtkMRMLViewNode2') == 0:\n self.logicDelayDisplay(\"Test failed: widget is not visible in view 1 and 2\")\n self.printViewNodeIDs(displayNode)\n return False\n\n #\n # show only in view 1\n #\n displayNode.AddViewNodeID(\"vtkMRMLViewNode1\")\n self.logicDelayDisplay(\"Showing only in view 1\")\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode2') == 1:\n self.logicDelayDisplay(\"Test failed: widget is not supposed to be visible in view 2\")\n # self.printViewNodeIDs(displayNode)\n return False\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode1') == 0:\n self.logicDelayDisplay(\"Test failed: widget is not visible in view 1\")\n # self.printViewNodeIDs(displayNode)\n return False\n\n # switch back to conventional\n lm.setLayout(2)\n self.logicDelayDisplay(\"Switched back to conventional layout\")\n # self.printViewAndSliceNodes()\n\n # test of the visibility in slice views\n displayNode.RemoveAllViewNodeIDs()\n\n # jump to the last fiducial\n slicer.modules.markups.logic().JumpSlicesToNthPointInMarkup(fidNode.GetID(), index, 1)\n # refocus the 3D cameras as well\n slicer.modules.markups.logic().FocusCamerasOnNthPointInMarkup(fidNode.GetID(), index)\n\n # show only in red\n displayNode.AddViewNodeID('vtkMRMLSliceNodeRed')\n self.logicDelayDisplay(\"Show only in red slice\")\n if self.widgetVisibleOnSlice(fidNode,'vtkMRMLSliceNodeRed') != 1:\n self.logicDelayDisplay(\"Test failed: widget not displayed on red slice\")\n # self.printViewNodeIDs(displayNode)\n return False\n\n # remove all, add green\n # print 'before remove all, after added red'\n # self.printViewNodeIDs(displayNode)\n displayNode.RemoveAllViewNodeIDs()\n # print 'after removed all'\n # self.printViewNodeIDs(displayNode)\n displayNode.AddViewNodeID('vtkMRMLSliceNodeGreen')\n self.logicDelayDisplay('Show only in green slice')\n if self.widgetVisibleOnSlice(fidNode,'vtkMRMLSliceNodeRed') != 0 or self.widgetVisibleOnSlice(fidNode,'vtkMRMLSliceNodeGreen') != 1:\n self.logicDelayDisplay(\"Test failed: widget not displayed only on green slice\")\n print '\\tred = ',self.widgetVisibleOnSlice(fidNode,'vtkMRMLSliceNodeRed')\n print '\\tgreen =',self.widgetVisibleOnSlice(fidNode,'vtkMRMLSliceNodeGreen')\n self.printViewNodeIDs(displayNode)\n return False\n\n return True", "def view(self):", "def CreatePresentation(self, event):\n pass" ]
[ "0.5931078", "0.552589", "0.5501078", "0.5446661", "0.53483385", "0.5338679", "0.53334534", "0.53294927", "0.5276309", "0.5274671", "0.52258205", "0.52163804", "0.5206252", "0.52037615", "0.5201259", "0.51994044", "0.5165678", "0.5152966", "0.510705", "0.5101222", "0.5090006", "0.50698924", "0.50683725", "0.50550133", "0.5039516", "0.5030147", "0.5029471", "0.5009873", "0.49990058", "0.49841642" ]
0.6162525
0
Get the full extent of an axes, including axes labels, tick labels, and titles.
def full_extent(ax, pad=0.0): # For text objects, we need to draw the figure first, otherwise the extents # are undefined. ax.figure.canvas.draw() items = ax.get_xticklabels() + ax.get_yticklabels() # items += [ax, ax.title, ax.xaxis.label, ax.yaxis.label] items += [ax, ax.title] bbox = Bbox.union([item.get_window_extent() for item in items]) return bbox.expanded(1.0 + pad, 1.0 + pad)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extent(self):\n return self._ax.extent", "def full_extent(ax, pad=0.0):\n # For text objects, we need to draw the figure first, otherwise the extents are undefined.\n ax.figure.canvas.draw()\n items = ax.get_xticklabels() + ax.get_yticklabels()\n items += [ax, ax.title]\n bbox = Bbox.union([item.get_window_extent() for item in items])\n return bbox.expanded(pad + 1, pad + 1)", "def extent(self):\n return np.array(self._extent)", "def _getAxesExtent(\n self,\n x0: float,\n y0: float,\n x1: float,\n y1: float,\n enabledAxes: Optional[EnabledAxes] = None,\n ) -> AxesExtent:\n if enabledAxes is None:\n enabledAxes = self.enabledAxes\n\n y2_0, y2_1 = y0, y1\n left, top, width, height = self.plot.getPlotBoundsInPixels()\n\n if not all(enabledAxes) and not self.plot.isKeepDataAspectRatio():\n # Handle axes disabled for zoom if plot is not keeping aspec ratio\n if not enabledAxes.xaxis:\n x0, x1 = left, left + width\n if not enabledAxes.yaxis:\n y0, y1 = top, top + height\n if not enabledAxes.y2axis:\n y2_0, y2_1 = top, top + height\n\n if self.plot.isKeepDataAspectRatio() and height != 0 and width != 0:\n ratio = width / height\n xextent, yextent = math.fabs(x1 - x0), math.fabs(y1 - y0)\n if xextent != 0 and yextent != 0:\n if xextent / yextent > ratio:\n areaHeight = xextent / ratio\n center = 0.5 * (y0 + y1)\n y0 = center - numpy.sign(y1 - y0) * 0.5 * areaHeight\n y1 = center + numpy.sign(y1 - y0) * 0.5 * areaHeight\n else:\n areaWidth = yextent * ratio\n center = 0.5 * (x0 + x1)\n x0 = center - numpy.sign(x1 - x0) * 0.5 * areaWidth\n x1 = center + numpy.sign(x1 - x0) * 0.5 * areaWidth\n\n # Convert to data space\n x0, y0 = self.plot.pixelToData(x0, y0, check=False)\n x1, y1 = self.plot.pixelToData(x1, y1, check=False)\n y2_0 = self.plot.pixelToData(None, y2_0, axis=\"right\", check=False)[1]\n y2_1 = self.plot.pixelToData(None, y2_1, axis=\"right\", check=False)[1]\n\n return AxesExtent(\n min(x0, x1),\n max(x0, x1),\n min(y0, y1),\n max(y0, y1),\n min(y2_0, y2_1),\n max(y2_0, y2_1),\n )", "def extent(self):\n left = self.transform[0]\n right = left + self.transform[1] * self.shape[1]\n top = self.transform[3]\n bottom = top + self.transform[5] * self.shape[0]\n return left, right, bottom, top", "def extent(self):\n ulx, uly, lrx, lry = self.ul_lr\n return ulx, lry, lrx, uly", "def extent(self):\n\n x = np.array([0, self.nx]) * self.dx + self.corner_grid.x0\n ypoint = [0, self.ny] if self.origin == 'lower-left' else [self.ny, 0]\n y = np.array(ypoint) * self.dy + self.corner_grid.y0\n\n return [x[0], x[1], y[0], y[1]]", "def extent(self):\n return self._extent", "def get_extent(self):\n pass", "def extent(self):\n rx0 = gxapi.float_ref()\n ry0 = gxapi.float_ref()\n rz0 = gxapi.float_ref()\n rx1 = gxapi.float_ref()\n ry1 = gxapi.float_ref()\n rz1 = gxapi.float_ref()\n self.gxvox.get_area(rx0, ry0, rz0, rx1, ry1, rz1)\n if self.is_depth:\n return gxgm.Point2(((rx0.value, ry0.value, -rz1.value), (rx1.value, ry1.value, -rz0.value)))\n return gxgm.Point2(((rx0.value, ry0.value, rz0.value), (rx1.value, ry1.value, rz1.value)),\n self.coordinate_system)", "def _get_extent_axes(self, x):\n if not hasattr(self, 'get_subplotspec'):\n return [self]\n y = ('y' if x == 'x' else 'x')\n idx = (0 if x == 'x' else 1)\n argfunc = (np.argmax if x == 'x' else np.argmin)\n irange = self._range_gridspec(x)\n axs = [ax for ax in self.figure._axes_main\n if ax._range_gridspec(x) == irange]\n if not axs:\n return [self]\n else:\n pax = axs.pop(argfunc([ax._range_gridspec(y)[idx] for ax in axs]))\n return [pax, *axs]", "def getExtent(self):\n extent = self.parent.biomeGeometry.extent\n return extent", "def get_data_extent(self):\n xs, ys = self.xs, self.ys\n xmin, xmax = min(xs), max(xs)\n ymin, ymax = min(xy), max(ys)\n w = maxx - minx\n h = maxy - miny\n return xmin, ymax, w, h", "def get_extent(self):\n geot = self.geotransform()\n return (geot[0], geot[3] + self.YSize() * geot[5],\n geot[0] + self.XSize() * geot[1], geot[3])", "def get_data_extent(self):\n x, y = self.xy[0], self.xy[1]\n w, h = self.width, self.height\n return x, y, w, h", "def extent(self):\n return self.index.max() - self.index.min(), self.columns.max() - self.columns.min()", "def getCurrentExtent(self):\n if not self.currentBox:\n extent = None\n else:\n extent = boxToExtent(self.currentBox)\n return extent", "def get_axes_pixelsize(self):\n bbox = self.axes.get_window_extent().transformed(self.figure.dpi_scale_trans.inverted())\n width, height = bbox.width, bbox.height\n width *= self.figure.dpi\n height *= self.figure.dpi\n return width, height", "def full_extent(self):\n log.debug(\"Set full extent\")\n self.auto_scale = False\n local_plot = self.main_curve_dialog.get_plot()\n local_plot.set_axis_limits(0, 0, 4096)\n local_plot.replot()", "def geoextent(self):\r\n return self.series_extent", "def get_extent_from_dataset(ds):\n\n transform = ds.GetGeoTransform()\n rows = ds.RasterYSize\n cols = ds.RasterXSize\n\n west = transform[0]\n ewres = transform[1]\n north = transform[3]\n nsres = transform[5]\n\n south = north + (rows * nsres)\n east = west + (cols * ewres)\n\n # print(\"crs\", ds.GetProjection())\n # print(\"cols\", cols)\n # print(\"north\", north)\n # print(\"south\", south)\n # print(\"west\", west)\n # print(\"east\", east)\n # print(\"ewres\", ewres)\n # print(\"nsres\", nsres)\n\n extent = SpatialExtent(top=north, bottom=south, left=west,\n right=east, width=abs(ewres), height=abs(nsres))\n\n return extent", "def axes(self):\n return self._axes", "def axes(self):\n return self._axes", "def get_extent(ds):\n\n #\n # Check if latitude and longitude are stored as coordinates.\n #\n if 'lon' in ds.coords and 'lat' in ds.coords:\n return BoundingBox(\n left=ds.lon.values.min(),\n bottom=ds.lat.values.min(),\n right=ds.lon.values.max(),\n top=ds.lat.values.max()\n )\n\n #\n # Otherwise, get extent from projection information\n # by projecting the corner coordinates onto EPSG:4326\n # to obtain the latitude and longitude at the four corners.\n #\n src_crs = get_crs(ds)\n if src_crs is None:\n raise CRSError('Could not determine the CRS.')\n\n dst_crs = CRS(init='epsg:4326')\n proj_bounds = get_bounds(ds)\n bounds = rasterio.warp.transform_bounds(\n src_crs, dst_crs, **proj_bounds._asdict()\n )\n return BoundingBox(*bounds)", "def extents(self):\n x0, y0, width, height = self._rect_bbox\n xmin, xmax = sorted([x0, x0 + width])\n ymin, ymax = sorted([y0, y0 + height])\n return xmin, xmax, ymin, ymax", "def extent(self):\r\n if not hasattr(self, '_extent'):\r\n self._extent = conf.lib.clang_getCursorExtent(self)\r\n\r\n return self._extent", "def get_original_position(self, axes, renderer):\n if self._locator is None:\n bbox = axes.get_position(original=True)\n else:\n bbox = self._locator(axes, renderer)\n return bbox", "def get_data_extent(self):\n \n x, y = self.xy[0], self.xy[1]\n w, h = self.radius, self.radius\n return x-w, y+w, w, h", "def axes(self) -> np.ndarray: # array[Axes]\n return self._axes", "def extents(self):\n if self.direction == 'horizontal':\n vmin = self._selection_artist.get_x()\n vmax = vmin + self._selection_artist.get_width()\n else:\n vmin = self._selection_artist.get_y()\n vmax = vmin + self._selection_artist.get_height()\n return vmin, vmax" ]
[ "0.75531536", "0.7368489", "0.6839166", "0.67600006", "0.67201394", "0.6709703", "0.66875136", "0.6674248", "0.6548036", "0.6335185", "0.63227665", "0.62621903", "0.625633", "0.6246917", "0.6236677", "0.62210065", "0.6219811", "0.61830145", "0.61672103", "0.6164999", "0.59673846", "0.5967135", "0.5967135", "0.59379244", "0.59362435", "0.59178257", "0.59157705", "0.5883945", "0.58698577", "0.5824236" ]
0.73843247
1
Setup the ringbuffer datastream, and return the channel object so that we can flush it when we want to. This datastream logs LOTS of data, but isn't written out unless explicitly flushed a flight recorder
def setup_ringbuffer_datastream(context): chan = context.create_channel("ring", 5000000, 50, 0, ringbuffer=True) enabled = { "SCHED" : True, "SIGNAL_FAM" : True, "SYSCALL" : True, "TASKALIAS": True, "DSKITRACE": True } ring_ds = context.create_datastream("ds_ring", "ring") ds.process_enabled_dict(enabled) return chan
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getBufferedData(self):\n if not self.ringBuffer: # first time when buffer is empty\n return np.zeros((1, self.windowLength, self.sensorChannels)) \n return np.array(self.ringBuffer)", "def scribe_buffer():\r\n if LogOptions._SCRIBE_BUFFER is None:\r\n LogOptions._SCRIBE_BUFFER = app.get_options().twitter_common_log_scribe_buffer\r\n return LogOptions._SCRIBE_BUFFER", "def buffer_stream(stream, batch_size):\n return pescador.buffer_streamer(stream, batch_size)", "def fillBuffer():\n buff[bufferCounter].next = dataIn", "def _fill_buffer(buff, in_data, frame_count, time_info, status_flags):\n buff.put(in_data)\n return None, pyaudio.paContinue", "def _start_streaming_ram_to_host(self):\n self.regs.SDRAM_HOST_READ_GO = 1\n self.regs.CSTREAM_CFG = 1", "def __init__(\n self, channels: int, sampling_rate: int, buffer_size: int\n ) -> None:\n if sd is None:\n raise sd_error\n\n if channels <= 0:\n raise ValueError('channels must be positive.')\n if sampling_rate <= 0:\n raise ValueError('sampling_rate must be positive.')\n if buffer_size <= 0:\n raise ValueError('buffer_size must be positive.')\n\n self._audio_buffer = []\n self._buffer_size = buffer_size\n self._channels = channels\n self._sampling_rate = sampling_rate\n\n # Create a ring buffer to store the input audio.\n self._buffer = np.zeros([buffer_size, channels], dtype=float)\n self._lock = threading.Lock()\n\n def audio_callback(data, *_):\n \"\"\"A callback to receive recorded audio data from sounddevice.\"\"\"\n self._lock.acquire()\n shift = len(data)\n if shift > buffer_size:\n self._buffer = np.copy(data[:buffer_size])\n else:\n self._buffer = np.roll(self._buffer, -shift, axis=0)\n self._buffer[-shift:, :] = np.copy(data)\n self._lock.release()\n\n # Create an input stream to continuously capture the audio data.\n self._stream = sd.InputStream(\n channels=channels,\n samplerate=sampling_rate,\n callback=audio_callback,\n )", "def __init__(self, chunk=512, audio_format=pyaudio.paInt16, channels=1, rate=44100):\n self.CHUNK = chunk\n self.FORMAT = audio_format\n self.CHANNELS = channels\n self.RATE = rate\n self.recorder = pyaudio.PyAudio()\n self.stream = None\n self.streaming_mode = False\n #self.QUEUE_LENGTH = 100000\n #self.stream_queue = Queue(self.QUEUE_LENGTH)\n self.stream_queue = Queue()", "def set_scribe_buffer(buffer_enabled):\r\n LogOptions._SCRIBE_BUFFER = buffer_enabled", "def start_callback_stream(self, callback, done):\n def stream_callback(in_data, frame_count, time_info, status):\n t = time_info['output_buffer_dac_time'] # get current time\n ts = np.arange(frame_count) / self.sample_rate + t # array of timesteps\n y = np.array([callback(t) for t in ts]) # compute output at each t\n y *= self.volume # scale output by volume\n return y.astype(np.float32).tobytes(), done(t) # return output buffer and done flag\n pa = pyaudio.PyAudio()\n stream = pa.open(rate=self.sample_rate, channels=1, format=pyaudio.paFloat32, output=1,\n frames_per_buffer=self.buffer_size, stream_callback=stream_callback)\n stream.start_stream()\n while stream.is_active():\n time.sleep(0.1)\n stream.stop_stream()\n stream.close()\n pa.terminate()", "def setup_traced_datastream(context, fams=[]):\n\tchan = context.create_channel(\"traced\", 500000, 30, 0, mmap=True)\n\tenabled = {\n\t\t\"SCHED\" : True,\n\t\t\"SIGNAL_FAM\" : True,\n\t\t\"SYSCALL\" : True,\n\t\t\"TASKALIAS\": True,\n\t\t\"DSKITRACE\": True\n\t}\n\n\tfor fam in fams:\n\t\tenabled[fam] = True\n\n\tfilters = [\n\t\t(\"task\", { \n\t\t\t\"tasks\" : [\n\t\t\t\t(\"trace-me-dskid\", {\"response\" : \"ACCEPT\"})\n\t\t\t],\n\t\t\t\"default_response\" : \"REJECT\"\t\t\n\t\t})\n\t]\t\n\n\tds = context.create_datastream(\"ds_traced\", \"traced\")\n\tds.process_enabled_dict(enabled)\n\tds.process_filter_list(filters)", "def _fill_buffer(self, in_data, frame_count, time_info, status_flags):\n self._buff.put(in_data)\n return None, pyaudio.paContinue", "def _start_device(self):\r\n enabled = [1,1,1,0]\r\n self._data = [np.empty(self._samples,dtype=np.int16) for i in range(3)]\r\n self._data_buffer = [x.ctypes for x in self._data]\r\n self._timebase = self.get_timebase(self._sampling_time)\r\n self.v_rangeAPI = [7,7,7,0] # 5V range\r\n self.v_range = [CHANNEL_RANGE[i][\"rangeV\"] for i in self.v_rangeAPI]\r\n with self._driver_lock:\r\n for i,v,en in zip(range(4),self.v_rangeAPI,enabled): # three active channels\r\n m = self._lib.ps2000aSetChannel(self._handle,\r\n c_int32(i), # channel\r\n c_int16(en), # enabled\r\n c_int32(1), # DC coupling\r\n c_int32(v), # voltage range (API value)\r\n c_float(0)) # 0V offset\r\n check_result(m)\r\n\r\n if en:\r\n m = self._lib.ps2000aSetDataBuffer(self._handle,\r\n c_int32(i), # channel\r\n self._data_buffer[i],\r\n c_int32(self._samples),\r\n c_uint32(0), # segment index\r\n c_int32(0)) # ratio mode\r\n check_result(m)\r\n\r\n threshold_v = 3\r\n threshold_adc = int(threshold_v * MAX_EXT / self.v_range[2])\r\n m = self._lib.ps2000aSetSimpleTrigger(self._handle,\r\n c_int16(1), # enabled\r\n c_int32(2), # Trigger off Channel C\r\n c_int16(threshold_adc),\r\n c_int32(2), # direction = rising\r\n c_uint32(0), # no delay\r\n c_int16(2000)) # autotrigger after 2 seconds if no trigger occurs\r\n check_result(m)\r\n\r\n # Send AWG Info to Picoscope\r\n delta_phase = c_uint32()\r\n output_freq = 1/self._sampling_duration\r\n # output_freq = 1E6\r\n m = self._lib.ps2000aSigGenFrequencyToPhase(self._handle,\r\n c_double(output_freq),\r\n c_int32(0),\r\n c_uint32(len(self._waveform)),\r\n byref(delta_phase))\r\n check_result(m)\r\n delta_phase = int(delta_phase.value)\r\n offset_voltage = 1\r\n pk2pk = 2\r\n # output_freq = 1E6\r\n # wave_type = {'sine':0,'square':1,'triangle':2,'DC':3,\r\n # 'rising sawtooth':4,'falling sawtooth':5,'sin(x)/x':6,\r\n # 'Gaussian':7,'half-sine':8}\r\n waveformPtr = self._waveform.ctypes\r\n trigger_type = 2 # siggen gate high\r\n trigger_source = 4 # software trigger\r\n m = self._lib.ps2000aSetSigGenArbitrary(self._handle,\r\n c_int32(int(offset_voltage*1E6)), \r\n c_uint32(int(pk2pk*1E6)),\r\n c_uint32(delta_phase), # start delta phase\r\n c_uint32(delta_phase), # stop delta phase\r\n c_uint32(0), # delta phase increment\r\n c_uint32(0), # dwell count\r\n waveformPtr, # arbitrary waveform\r\n c_int32(self._samples), # arbitrary waveform size\r\n c_int32(0), # sweep type for delta phase\r\n c_int32(0), # extra operations\r\n c_int32(0), # index mode\r\n c_uint32(1), # shots\r\n c_uint32(0), # sweeps\r\n c_int32(trigger_type),\r\n c_int32(trigger_source),\r\n c_int16(0)) # extIn threshold\r\n check_result(m)\r\n # m = self._lib.ps2000aSetSigGenBuiltIn(self._handle,\r\n # c_int32(int(offset_voltage*1E6)), # offset voltage\r\n # c_uint32(int(pk2pk*1E6)),# peak to peak voltage\r\n # c_int32(wave_type['square']), # wave type\r\n # c_float(output_freq), # start frequency\r\n # c_float(output_freq), # stop frequency\r\n # c_float(0), # increment\r\n # c_float(0), # dwell count\r\n # c_int32(0), # sweep type\r\n # c_int32(0), # operation\r\n # c_uint32(4), # shots\r\n # c_uint32(0), # sweeps\r\n # c_int32(trigger_type), \r\n # c_int32(trigger_source),\r\n # c_int16(0)) # extIn threshold\r\n # check_result(m)\r\n\r\n # for i in enabled:\r\n # if i:\r\n # m = self._lib.ps2000aSetDataBuffer(self._handle,\r\n # c_int32(i), # channel\r\n # self._data_buffer[i],\r\n # c_int32(self._samples),\r\n # c_uint32(0), # segment index\r\n # c_int32(0)) # ratio mode\r\n # check_result(m)\r\n\r\n self._save_thread = Thread(target=self.save,args=(self._save_queue,))\r\n self._save_thread.daemon = True\r\n self._save_thread.start()\r\n\r\n self._process_thread = Thread(target=self.process,args=(self._process_queue,self._save_queue))\r\n self._process_thread.daemon = True\r\n self._process_thread.start()\r\n\r\n self._collect_thread = Thread(target=self.run_loop,args=(self._process_queue,))\r\n self._collect_thread.daemon = True\r\n self._collect_thread.start()\r\n\r\n return True", "def acquisition_init(self):\n assert self._inlet is not None, \"Connect call is required.\"\n metadata = self._inlet.info()\n log.debug(metadata.as_xml())\n for marker_inlet in self._marker_inlets:\n log.debug(\"Streaming from marker inlet: %s\",\n inlet_name(marker_inlet))\n\n info_channels = self._read_channels(metadata)\n info_fs = metadata.nominal_srate()\n\n # If channels are not initially provided, set them from the metadata.\n # Otherwise, confirm that provided channels match metadata, or meta is\n # empty.\n if not self.channels:\n self.channels = info_channels\n assert self.channels, \"Channels must be provided\"\n else:\n if info_channels and self.channels != info_channels:\n raise Exception(\"Channels read from the device do not match \"\n \"the provided parameters\")\n assert len(self.channels) == (metadata.channel_count() +\n len(self._appended_channels) +\n len(self._marker_inlets)),\\\n \"Channel count error\"\n\n if not self.fs:\n self.fs = info_fs\n elif self.fs != info_fs:\n raise Exception(\"Sample frequency read from device does not match \"\n \"the provided parameter\")", "def initialize(self, config: DataConsumerConfig) -> None:\n super().initialize(config)\n self.server_socket = PipeSocket.INPUT\n # High water mark optimization\n chn: Channel = self.mngr.channels[PIPE_CHN]\n chn.sock_opts['rcvhwm'] = int(self.batch_size / 2) + 5\n chn.sock_opts['sndhwm'] = 5", "def flush(self):\n \n # Buffer management\n # If data buffer not empty, send a set of values\n if self._data_buffer != []:\n time, data = self._data_buffer[0]\n self._log.debug(\"Server \" + \n self._settings['domain'] + self._settings['path'] + \n \" -> send data: \" + str(data) + \n \", timestamp: \" + str(time))\n if self._send_data(data, time):\n # In case of success, delete sample set from buffer\n del self._data_buffer[0]\n # If buffer size reaches maximum, trash oldest values\n # TODO: optionnal write to file instead of losing data\n size = len(self._data_buffer)\n if size > 1000:\n self._data_buffer = self._data_buffer[size - 1000:]", "def __init__(self, array_path: FilePath, buffer_time: float):\r\n # Load array from `array_path`\r\n try:\r\n loaded_data: np.ndarray = np.load(array_path)\r\n except FileNotFoundError as _:\r\n raise StreamException(f\"Stream file {array_path} not found!\")\r\n\r\n self._signal_data: np.ndarray\r\n self._time_data: np.ndarray\r\n # Recording stores time as well as data\r\n if np.shape(loaded_data)[0] == 2:\r\n self._signal_data = loaded_data[0, :]\r\n self._time_data = loaded_data[1, :]\r\n # Recording only stores data\r\n else:\r\n self._signal_data = loaded_data\r\n self._time_data = np.linspace(\r\n 0,\r\n len(self._signal_data) / ARRAY_UNIT_SIZE,\r\n len(self._signal_data),\r\n )\r\n\r\n # Chunk size and sample rate\r\n super().__init__(int(ARRAY_UNIT_SIZE * buffer_time), ARRAY_UNIT_SIZE)\r\n # Index of stream\r\n self._pointer: int = 0\r\n\r\n # Restart flag\r\n self._restart_flag: bool = False", "def initialize(self, config: DataProviderConfig) -> None:\n super().initialize(config)\n self.server_socket = PipeSocket.OUTPUT\n # High water mark optimization\n chn: Channel = self.mngr.channels[PIPE_CHN]\n chn.sock_opts['rcvhwm'] = 5\n chn.sock_opts['sndhwm'] = int(self.batch_size / 2) + 5", "def start_recording(self) -> None:\n # Clear the internal ring buffer.\n self._buffer.fill(0)\n\n # Start recording using sounddevice's InputStream.\n self._stream.start()", "def _fill_buffer(self, in_data, frame_count, time_info, status_flags):\n frames = in_data\n self._data_frame.append(frames)\n self._buff.put(in_data)\n return None, pyaudio.paContinue", "def fill_buffer(self):\n num_of_smp = 0\n while num_of_smp < self.buf_size:\n c, t = self.inlet.pull_chunk(timeout=0.0)\n new_c = []\n new_t = []\n while c:\n new_c += c\n new_t += t\n c, t = self.inlet.pull_chunk(timeout=0.0)\n\n # add samples to buffer\n if any(new_c):\n # add samples\n num_of_smp += len(new_c)\n data_v = [item for sublist in new_c for item in sublist]\n self.gbuffer = np.roll(self.gbuffer, -len(data_v))\n self.gbuffer[-len(data_v):] = data_v\n # add timestamps\n if new_t:\n self.gtimes = np.roll(self.gtimes, -len(new_t))\n self.gtimes[-len(new_t):] = new_t", "def create_stream(self):\n pass", "def whenWriteReady(self, channel, call):", "def stream_channel(self, path, chunk_size=1000000, num_samples=None, verbose=False):\n def gen(path, chunk_size):\n dataset = self.file[path]\n if num_samples != None:\n length = num_samples\n else:\n length = len(dataset)\n offset = 0\n while offset < length:\n if length - offset > chunk_size:\n chunk = chunk_size\n else:\n chunk = length - offset\n verbose and print(\"processing new batch {0} of {1} ({2:.2%})\".format(\n offset, length, offset/length))\n buffer = dataset[offset:offset+chunk]\n yield buffer\n offset += chunk\n return Stream(gen(path, chunk_size), chunk_size=chunk_size)", "def open(self):\n if self.__stream is None:\n self.__open() # instantiate stream object\n self.__stream.start_stream() # reactivate collecting samples", "def initBuffer(self, env):\n cnt = 0\n while len(self.memory) < self.memory.capacity:\n cnt += 1\n print(\"\\rWarmup Buffer [{:d}]\".format(cnt), end=\"\")\n s = env.reset()\n actionIdx, actionIdxTuple = self.select_action(s, explore=True)\n s_, r, done, info = env.step(actionIdxTuple)\n self.store_transition(s, actionIdx, r, s_, info)\n print(\"\\n => Warmup Buffer Ends\")", "def _fill_buffer(self, in_data, *args, **kwargs):\n self._buff.put(in_data)\n return None, pyaudio.paContinue", "def _init_streams(self) -> None:\n assert self._is_root\n assert torch.cuda.is_available()\n # Stream for all-gathering parameters.\n self._streams[\"all_gather\"] = torch.cuda.Stream()\n # Stream for overlapping grad reduction with the backward pass.\n self._streams[\"post_backward\"] = torch.cuda.Stream()\n # Stream for pre-all-gather copies (e.g. H2D or precision cast).\n self._streams[\"pre_all_gather\"] = torch.cuda.Stream()", "def _fill_buffer(self, in_data, *args, **kwargs):\r\n self._buff.put(in_data)\r\n return None, pyaudio.paContinue", "def read_and_display_data(hat, samples_per_channel, num_channels): \n \n # Read all of the available samples (up to the size of the read_buffer which\n # is specified by the user_buffer_size). Since the read_request_size is set\n # to -1 (READ_ALL_AVAILABLE), this function returns immediately with\n # whatever samples are available (up to user_buffer_size) and the timeout\n # parameter is ignored.\n total_samples_read = 0\n read_request_size = READ_ALL_AVAILABLE\n completeFlag = 0 \n timeout = 5.0\n \n # file switch: w = Write to a file\n # file switch: w+ = Write to a file, if it doesn't exist create it\n # file switch: a = Append to a file\n # file switch: a+ = Append to a file, if is doesn't exist create it.\n # file switch: x = will create a file, returns an error if the file exist\n \n\n # If the scan starts, create a file name based upon current date and time.\n # Retrieve the Current Working Directory and generate the full path \n # to where to write the collected data as a .csv file. Open the file \n # begin writing the data to the file. When done, close the file.\n \n try:\n if os.path.exists(basepath):\n if not (os.path.exists(mypath)):\n os.mkdir(mypath)\n else:\n os.mkdir(basepath)\n os.chdir(basepath)\n os.mkdir(mypath)\n except OSError as exc:\n raise\n \n os.chdir(mypath)\n fileDateTime = datetime.strftime(datetime.now(), \"(%m_%d_%Y)-(%H-%M-%S)\")\n #filePath = mypath + \"/\" + DAQ_NAME + \"_\" + fileName + \".csv\"\n filePath = mypath + \"/\" + DAQ_NAME + \"_\" + fileDateTime + \".csv\"\n csvfile = open(filePath, \"w+\")\n csvwriter = csv.writer(csvfile) \n \n # Recording LED\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(RECORDING_LED, GPIO.OUT, initial=GPIO.LOW)\n GPIO.output(RECORDING_LED,GPIO.HIGH)\n \n while total_samples_read < samples_per_channel:\n read_result = hat.a_in_scan_read(read_request_size, timeout)\n\n # Check for an overrun error\n if read_result.hardware_overrun:\n print('\\n\\nHardware overrun\\n')\n break\n elif read_result.buffer_overrun:\n print('\\n\\nBuffer overrun\\n')\n break\n elif not (read_result.running and completeFlag == 0):\n completeFlag = 1\n print('\\n (2) Recording Completed - Buffer Draining')\n\n samples_read_per_channel = int(len(read_result.data) / num_channels)\n total_samples_read += samples_read_per_channel\n \n totalSamples = len(read_result.data) \n\n if samples_read_per_channel > 0:\n index = samples_read_per_channel * num_channels - num_channels\n \n new_index = 0\n myArray=[] #create an empty array\n for i in range(0, totalSamples, num_channels):\n myArray.append([]) #add a row to the array (COLUMN)\n for j in range(num_channels):\n\t\t\t\t\t#append a num_channels of data to the array (ROW)\n myArray[new_index].append(read_result.data[i + j]) \n new_index+=1\n\n csvwriter.writerows(myArray) #Write the array to file\n csvfile.flush\n\n # Cleanup\n csvfile.close() \n print('\\n (3) Buffer Drained - Data Saved to CSV File\\n')\n GPIO.cleanup()\n GPIO.setmode(GPIO.BCM)\n \n # Complete LED\n GPIO.setup(COMPLETE_LED, GPIO.OUT, initial=GPIO.LOW)\n GPIO.output(COMPLETE_LED,GPIO.HIGH)\n time.sleep(5)\n GPIO.cleanup()\n hat.a_in_scan_cleanup()\n global CMD_RECEIVED\n CMD_RECEIVED = 1\n \n # Restarts script to prepare for another recording\n main()" ]
[ "0.5606128", "0.5467135", "0.5409699", "0.5336863", "0.5334489", "0.53252643", "0.53099316", "0.52994365", "0.528977", "0.52294934", "0.5184993", "0.5103268", "0.51002824", "0.5098576", "0.5091853", "0.5083243", "0.5059984", "0.5043554", "0.50412124", "0.5015153", "0.50136906", "0.5008746", "0.50085723", "0.50072354", "0.4986857", "0.4986655", "0.49833086", "0.49816456", "0.4978819", "0.49698764" ]
0.7082423
0
import_types is used to parse and prepare the type rules that will be used in calculating the types of expression trees.
def import_types(self, typerule_list, variable_types = []): # For simplicity, variable types are treated exactly the same as type rules all_type_rules = variable_types + typerule_list # Sort all type rules by their input lengths into the _type_rules dict for type_rule in all_type_rules: self._type_rules[len(type_rule[0])].append(TypeRule(type_rule[0], type_rule[1])) # Add wildcard types as lowest priority for cleanup self._type_rules[1].append(TypeRule(['?'], '?')) self._type_rules[3].append(TypeRule(['(', '?', ')'], '?'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _imports(graph: mapry.Graph, py: mapry.Py) -> str:\n # pylint: disable=too-many-branches\n # pylint: disable=too-many-statements\n stdlib_block = {'import typing'}\n\n third_party_block = set() # type: Set[str]\n\n if mapry.needs_type(a_type=graph, query=mapry.Path):\n if py.path_as == 'str':\n pass\n elif py.path_as == \"pathlib.Path\":\n stdlib_block.add(\"import pathlib\")\n else:\n raise NotImplementedError(\n \"Unhandled path_as: {!r}\".format(py.path_as))\n\n if mapry.needs_type(a_type=graph, query=mapry.TimeZone):\n if py.timezone_as == 'str':\n pass\n\n elif py.timezone_as == 'pytz.timezone':\n third_party_block.update(\n ('import pytz', 'import pytz.exceptions # type: ignore'))\n\n else:\n raise NotImplementedError(\n 'Unhandled timezone_as: {}'.format(py.timezone_as))\n\n # yapf: disable\n if any(mapry.needs_type(a_type=graph, query=query)\n for query in\n (mapry.Date, mapry.Time, mapry.Datetime, mapry.Duration)):\n # yapf: enable\n stdlib_block.add('import datetime')\n\n if mapry.needs_type(a_type=graph, query=mapry.Map):\n stdlib_block.add(\"import collections\")\n\n if len(graph.classes) > 0:\n stdlib_block.add(\n 'import collections'\n ) # needed for the initialization of class registries\n\n ##\n # Needs regex?\n ##\n\n import_re = False\n for a_type, _ in mapry.iterate_over_types(graph=graph):\n if isinstance(a_type, (mapry.String, mapry.Path)) and a_type.pattern:\n import_re = True\n break\n\n if isinstance(a_type, mapry.Duration):\n import_re = True\n break\n\n for cls in graph.classes.values():\n if cls.id_pattern is not None:\n import_re = True\n break\n\n if import_re:\n stdlib_block.add(\"import re\")\n\n ##\n # First party\n ##\n\n first_party_block = {\n 'import {}'.format(py.module_name),\n 'import {}.parse'.format(py.module_name)\n }\n\n block_strs = [] # type: List[str]\n if len(stdlib_block) > 0:\n block_strs.append('\\n'.join(sorted(stdlib_block)))\n\n if len(third_party_block) > 0:\n block_strs.append('\\n'.join(sorted(third_party_block)))\n\n if len(first_party_block) > 0:\n block_strs.append('\\n'.join(sorted(first_party_block)))\n\n return '\\n\\n'.join(block_strs)", "def _build_imports(tree: dict) -> None:\n def _apply(item: dict) -> None:\n if item[\"type\"] == \"module\":\n item[\"imports\"] = get_imports(item[\"path\"])\n apply_tree(tree, _apply)", "def get_type_defs_required_import_records(self) -> list[ImportRecord]:\n if not self.typed_dicts:\n return []\n\n import_records: set[ImportRecord] = set()\n import_records.add(TypeTypedDict.get_typing_import_record())\n for typed_dict in self.typed_dicts:\n if typed_dict.replace_with_dict:\n import_records.add(Type.Any.get_import_record())\n import_records.add(Type.Dict.get_import_record())\n\n for type_annotation in typed_dict.get_children_types():\n import_record = type_annotation.get_import_record()\n if not import_record or import_record.is_builtins():\n continue\n if import_record.is_type_defs():\n continue\n import_records.add(\n import_record.get_external(self.get_module_name(self.service_name))\n )\n\n self.add_fallback_import_record(import_records)\n return sorted(import_records)", "def get_imported_types(ast: ast_pb2.AST,\n include_paths: List[str]) -> Set[str]:\n result = set()\n includes = set(ast.usertype_includes)\n for include in includes:\n if include.endswith('_clif.h'):\n clif_uses = _get_clif_uses(include, include_paths)\n for clif_use in clif_uses:\n result.add(clif_use.cpp_name)\n return result", "def special_import(self, form):\n if len(form) == 2:\n return ast.Import([(form[1].name, None)])\n else:\n r = ast.From(form[1].name, [(x.name, None) for x in form[2:]], -1)\n return r", "def ImportParsers(cls, import_dir):\n sys.path.append(import_dir)\n cls.elf_parser = importlib.import_module(\n \"vts.utils.python.library.elf_parser\")\n cls.vtable_parser = importlib.import_module(\n \"vts.utils.python.library.vtable_parser\")", "def resolve_imports(self):\n\n if not self.koocer:\n # @import resolution disabled\n return\n\n for tl in self.ast.body:\n if not isinstance(tl, knodes.KcImport):\n continue\n\n # load, preprocess, parse:\n kc = self.koocer(tl.file_fullpath)\n kc.parse()\n\n sub_ast = kc.ast\n\n # pass basic visitors\n sub_linkchecks = LinkChecks(self.koocer)\n sub_linkchecks.register()\n sub_linkchecks.run(sub_ast)\n\n sub_class_builder = ClassBuilder()\n sub_class_builder.register()\n sub_class_builder.run(sub_ast)\n\n # merge sub_ast informations\n\n # merge kooc types\n new_ktypes = ChainMap(self.ast.ktypes, sub_ast.ktypes)\n self.ast.ktypes = new_ktypes\n\n # merge C types\n new_types = ChainMap(self.ast.types, sub_ast.types)\n self.ast.types = new_types\n\n # merge C top declarations\n new_c_top_decl = ChainMap(self.ast.c_top_decl, sub_ast.c_top_decl)\n self.ast.c_top_decl = new_c_top_decl", "def get_types_import_string(cls):\n type_str = ', '.join(cls.types)\n return f\"from typing import {type_str}\"", "def _parse_import_list(self):\n imports = []\n brackets = False\n continue_kw = [\",\", \";\", \"\\n\", ')'] \\\n + list(set(keyword.kwlist) - set(['as']))\n while True:\n defunct = False\n token_type, tok = self.next()\n if tok == '(': # python allows only one `(` in the statement.\n brackets = True\n token_type, tok = self.next()\n if brackets and tok == '\\n':\n self.next()\n i, token_type, tok = self._parse_dot_name(self._current)\n if not i:\n defunct = True\n name2 = None\n if tok == 'as':\n name2, token_type, tok = self._parse_dot_name()\n imports.append((i, name2, defunct))\n while tok not in continue_kw:\n token_type, tok = self.next()\n if not (tok == \",\" or brackets and tok == '\\n'):\n break\n return imports", "def add_types(self, types, type_headers):\n data_to_type_csvs(self, types, type_headers)\n import_type_csvs(self, type_headers)", "def prepare_imports(self, extend):\n tmp = re.match(r'.+/(.+).js', extend)\n if tmp:\n return self.imports(what=tmp.group(1), wherefrom=extend)\n raise GenerateError('Can not extract imports from {}'.format(extend))", "def create_importfrom(module, names, level=0, line=0, column=0):\n importfrom = ast.ImportFrom()\n importfrom.level = level\n importfrom.module = module\n\n if data_structures_copy.is_iterable(names):\n importfrom.names = names\n else:\n importfrom.names = [names]\n\n importfrom.lineno = line\n importfrom.col_offset = column\n\n return importfrom", "def cython_import_tuples(self, t, seen=None):\n t = self.canon(t)\n if seen is None:\n seen = set()\n if isinstance(t, basestring):\n if t in self.base_types:\n seen.update(self.cython_pyimports[t])\n seen -= set((None, (None,)))\n return seen\n # must be tuple below this line\n tlen = len(t)\n if 2 == tlen:\n if self.isrefinement(t[1]) and t[1][0] in self.cython_pyimports:\n f = self.cython_pyimports[t[1][0]]\n if callable(f):\n f(t[1], self, seen)\n seen.update(self.cython_pyimports.get(t[0], (None,)))\n seen.update(self.cython_pyimports.get(t[1], (None,)))\n seen -= set((None, (None,)))\n return self.cython_import_tuples(t[0], seen)\n elif 3 <= tlen:\n assert t[0] in self.template_types\n seen.update(self.cython_pyimports[t[0]])\n for x in t[1:-1]:\n if isinstance(x, Number):\n continue\n elif isinstance(x, basestring) and x not in self.cython_cimports:\n continue\n self.cython_import_tuples(x, seen)\n seen -= set((None, (None,)))\n return seen", "def visit_Import(self, node: ast.Import) -> None:\n self.imports.append(node)\n\n # absolute imports - ignore indentation and just get all module names\n for name in node.names:\n self.modules.append(name.name)", "def register_imports(column_type, isOptional):\n used_validator_imports[column_type] = True\n if isOptional:\n used_validator_imports[\"OPTIONAL\"] = True", "def get_import_function_types(imports: Iterable[TExtern]) -> Tuple[FunctionType, ...]:\n return tuple(item for item in imports if isinstance(item, FunctionType))", "def enaml_importer():\n print(imports, dir(imports))\n old = imports.get_importers()\n\n yield imports\n\n imports._imports__importers = old", "def _serialize_type(import_type_dict: Dict[str, Set[Optional[str]]], delimiter: str) -> str:\n import_list = []\n for package_name in sorted(list(import_type_dict.keys())):\n module_list = import_type_dict[package_name]\n import_list.append(_serialize_package(package_name, module_list, delimiter))\n return delimiter.join(import_list)", "def _analyse_stmt_ImportFrom(\n self, statement: ast.ImportFrom, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def get_import_table_types(imports: Iterable[TExtern]) -> Tuple[TableType, ...]:\n return tuple(item for item in imports if isinstance(item, TableType))", "def visit_ImportFrom(self, node: Any): # noqa: N802\n # print(\"import from:\", node, dir(node))\n for alias in node.names:\n self.nodes[\"imports_from\"][node.module].append(alias.name)\n self.generic_visit(node)", "def get_typing_import_record() -> ImportRecord:\n return Type.TypedDict.get_import_record()", "def get_literals_required_import_records(self) -> list[ImportRecord]:\n import_records: set[ImportRecord] = set()\n import_records.add(TypeLiteral.get_typing_import_record())\n self.add_fallback_import_record(import_records)\n return sorted(import_records)", "def extractTypesFromString(types):\n\tftypes = types.split(\",\")\n\tdtypes = dict()\n\tcvalues = dict()\n\tfor ftype in ftypes:\n\t\titems = ftype.split(\":\") \n\t\tcindex = int(items[0])\n\t\tdtype = items[1]\n\t\tdtypes[cindex] = dtype\n\t\tif len(items) == 3:\n\t\t\tsitems = items[2].split()\n\t\t\tcvalues[cindex] = sitems\n\treturn (dtypes, cvalues)", "def injectTypes (g):\n\tself=__module__\n\ts=g.symbols\n\tg.token('TYPE_VAR', '_|[A-Z][A-Z0-9]*')\n\tg.rule('TypeParameter', s.LSB, listOf(g.agroup(s.TYPE_VAR, s.FQNAME), s.COMMA, g), s.RSB)\n\tg.rule('TypeReference', s.FQNAME._as('name'), s.TypeParameter.optional()._as('parameters'))\n\tg.group('TypeValue')\n\tg.rule('TypeExpression')\n\tg.rule('TypeUnionSuffix', s.PIPE, s.TypeValue)\n\tg.group('TypePrefix', s.TypeReference)\n\tg.group('TypeSuffix', s.TypeUnionSuffix)\n\tg.rule('TypeExpression', s.TypePrefix, s.TypeSuffix.zeroOrMore())\n\tg.rule('TypeParens', s.LP, listOf(s.TypeExpression, s.COMMA, g), s.RP)\n\ts.TypeValue.set(s.TypeParens, s.TypeExpression)\n\tg.rule('TypeSlot', s.CheckIndent, g.aword('@slot'), s.NAME._as('name'), g.arule(s.COLON, s.TypeValue).optional()._as('value'), s.EOL, s.Documentation.optional()._as('documentation'))\n\tg.group('TypeLine', s.TypeSlot)\n\tg.group('TypeCode', s.COMMENT, s.TypeLine)\n\tg.rule('TypeBody', s.Indent, s.TypeCode.zeroOrMore(), s.Dedent)\n\tg.rule('Type', s.CheckIndent, g.aword('@type'), s.TypeReference._as('name'), g.arule(s.COLON, s.TypeValue).optional()._as('value'), s.EOL, s.Documentation.optional()._as('documentation'), s.TypeBody.optional())", "def _get_types(self):\n\n db = Database()\n self.c_built_ins = list(map(lambda tup: tup[0], db.select_built_types()))\n self.c_built_in_array_types = r'^(' + '|'.join(self.escaped(self.c_built_ins)) + ')\\[[0-9]*\\]'\n self.c_types = list(map(lambda tup: tup[0], db.select_types()))\n self.c_array_types = r'^(' + '|'.join(self.escaped(self.c_types)) + ')\\[[0-9]*\\]'\n db.close_connection()", "def generate_from(ast: ast_pb2.AST,\n include_paths: List[str]) -> Generator[str, None, None]:\n includes = set(ast.usertype_includes)\n\n for include in includes:\n # Not generating type casters for the builtin types.\n # Not scanning headers generated by pybind11 code generator because the\n # `// CLIF USE` in those headers do not have associated `Clif_PyObjFrom` or\n # `Clif_PyObjAs`.\n if (include.startswith('clif/python') or\n # Excluding absl::Status and absl::StatusOr\n include.startswith('util/task/python')):\n continue\n clif_uses = _get_clif_uses(include, include_paths)\n for clif_use in clif_uses:\n yield from _generate_type_caster(clif_use.py_name, clif_use.cpp_name,\n clif_use.generate_load,\n clif_use.generate_cast)", "def import_and_add(self, import_str):\n # loaded_classes.clear()\n\n try:\n import_module(import_str)\n except ImportError as e:\n traceback.print_exc()\n logger.warning(\"Tried to import `%s` and failed, ignoring\", import_str)\n logger.warning(\"Error: %s\", e)\n # else:\n # for k in loaded_classes:\n # if k.__module__.startswith(\"dataclay\"):\n # # dataClay contrib classes should not be registered here\n # continue\n # else:\n # self.add_class(k)", "def _scan_bytecode(\n self, module, module_code_object, is_scanning_imports):\n level = None\n fromlist = None\n\n # 'deque' is a list-like container with fast appends, pops on\n # either end, and automatically discarding elements too much.\n prev_insts = deque(maxlen=2)\n for inst in util.iterate_instructions(module_code_object):\n if not inst:\n continue\n # If this is an import statement originating from this module,\n # parse this import.\n #\n # Note that the related \"IMPORT_FROM\" opcode need *NOT* be parsed.\n # \"IMPORT_NAME\" suffices. For further details, see\n # http://probablyprogramming.com/2008/04/14/python-import_name\n if inst.opname == 'IMPORT_NAME':\n # If this method is ignoring import statements, skip to the\n # next opcode.\n if not is_scanning_imports:\n continue\n\n assert prev_insts[-2].opname == 'LOAD_CONST'\n assert prev_insts[-1].opname == 'LOAD_CONST'\n\n # Python >=2.5: LOAD_CONST flags, LOAD_CONST names, IMPORT_NAME name\n level = prev_insts[-2].argval\n fromlist = prev_insts[-1].argval\n\n assert fromlist is None or type(fromlist) is tuple\n target_module_partname = inst.argval\n\n #FIXME: The exact same logic appears in _collect_import(),\n #which isn't particularly helpful. Instead, defer this logic\n #until later by:\n #\n #* Refactor the \"_deferred_imports\" list to contain 2-tuples\n # \"(_safe_import_hook_args, _safe_import_hook_kwargs)\" rather\n # than 3-tuples \"(have_star, _safe_import_hook_args,\n # _safe_import_hook_kwargs)\".\n #* Stop prepending these tuples by a \"have_star\" boolean both\n # here, in _collect_import(), and in _process_imports().\n #* Shift the logic below to _process_imports().\n #* Remove the same logic from _collect_import().\n have_star = False\n if fromlist is not None:\n fromlist = uniq(fromlist)\n if '*' in fromlist:\n fromlist.remove('*')\n have_star = True\n\n # Record this import as originating from this module for\n # subsequent handling by the _process_imports() method.\n module._deferred_imports.append((\n have_star,\n (target_module_partname, module, fromlist, level),\n {}\n ))\n\n elif inst.opname in ('STORE_NAME', 'STORE_GLOBAL'):\n # If this is the declaration of a global attribute (e.g.,\n # class, variable) in this module, store this declaration for\n # subsequent lookup. See method docstring for further details.\n #\n # Global attributes are usually irrelevant to import parsing, but\n # remain the only means of distinguishing erroneous non-ignorable\n # attempts to import non-existent submodules of a package from\n # successful ignorable attempts to import existing global\n # attributes of a package's \"__init__\" submodule (e.g., the \"bar\"\n # in \"from foo import bar\", which is either a non-ignorable\n # submodule of \"foo\" or an ignorable global attribute of\n # \"foo.__init__\").\n name = inst.argval\n module.add_global_attr(name)\n\n elif inst.opname in ('DELETE_NAME', 'DELETE_GLOBAL'):\n # If this is the undeclaration of a previously declared global\n # attribute (e.g., class, variable) in this module, remove that\n # declaration to prevent subsequent lookup. See method docstring\n # for further details.\n name = inst.argval\n module.remove_global_attr_if_found(name)\n\n prev_insts.append(inst)", "def import_modules_from_strings(imports, allow_failed_imports=False):\n if not imports:\n return\n single_import = False\n if isinstance(imports, str):\n single_import = True\n imports = [imports]\n if not isinstance(imports, list):\n raise TypeError(f\"custom_imports must be a list but got type {type(imports)}\")\n imported = []\n for imp in imports:\n if not isinstance(imp, str):\n raise TypeError(f\"{imp} is of type {type(imp)} and cannot be imported.\")\n try:\n imported_tmp = import_module(imp)\n except ImportError:\n if allow_failed_imports:\n warnings.warn(f\"{imp} failed to import and is ignored.\", UserWarning)\n imported_tmp = None\n else:\n raise ImportError\n imported.append(imported_tmp)\n if single_import:\n imported = imported[0]\n return imported" ]
[ "0.5921209", "0.57611024", "0.567505", "0.5611896", "0.5579761", "0.55159265", "0.54755175", "0.5425813", "0.5418027", "0.53423756", "0.53144497", "0.5309955", "0.52486587", "0.52472657", "0.5202084", "0.5195576", "0.518933", "0.517455", "0.51186496", "0.5110992", "0.5099339", "0.5097264", "0.50950736", "0.5085069", "0.507139", "0.5064147", "0.50009304", "0.49946344", "0.49881288", "0.4986781" ]
0.5941546
0
expression_type is the main entry point for the package. Call TypeParser.expression_type and pass in a parse tree to have the root type of the tree calculated via a DepthFirst Search of the tree.
def expression_type(self, node): # End recursion & return this node's type if it is a leaf or nonexistent if not node: return None if not node.get_children(): return self._subexpression_type(self._node_to_expression(node)) else: # Iteratively consume expression to calculate the type of this node. # Recurse on complex children. child_types = self._child_types(node.get_children()) while len(child_types) > 1: next_expression = self._next_expression(child_types) next_expression_type = self._subexpression_type(next_expression) child_types.insert(0, next_expression_type) return child_types[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _subexpression_type(self, expression):\n\n # Return the first matched TypeRule's output type, or None if no match\n for type_rule in self._type_rules[len(expression)]:\n applied_type = type_rule.apply(expression)\n if applied_type is not None:\n return applied_type\n return None", "def _infer_type_of_expression(expr, context):\n\n import operators\n import vba_library\n\n #print \"LOOK FOR TYPE\"\n #print expr\n #print type(expr)\n\n # Function with a hard coded type?\n if (hasattr(expr, \"return_type\")):\n #print \"POSSIBLE TYPE (1) '\" + safe_str_convert(expr) + \"' == \" + safe_str_convert(expr.return_type())\n return expr.return_type()\n\n # Call of function?\n import expressions\n if (isinstance(expr, expressions.Function_Call)):\n\n # Call of builtin function?\n if (expr.name.lower() in vba_library.VBA_LIBRARY):\n builtin = vba_library.VBA_LIBRARY[expr.name.lower()]\n if (hasattr(builtin, \"return_type\")):\n #print \"POSSIBLE TYPE (2.1) '\" + safe_str_convert(expr) + \"' == \" + safe_str_convert(builtin.return_type())\n return builtin.return_type()\n\n # Call of locally defined function.\n r = _get_local_func_type(expr, context)\n #print \"POSSIBLE TYPE (2.2) '\" + safe_str_convert(expr) + \"' == \" + safe_str_convert(r)\n return r\n \n # Easy cases. These have to be integers.\n if isinstance(expr, (operators.And,\n operators.Division,\n operators.FloorDivision,\n operators.Mod,\n operators.MultiDiv,\n operators.Multiplication,\n operators.Neg, operators.Not,\n operators.Or,\n operators.Power,\n operators.Subtraction,\n operators.Xor)):\n #print \"POSSIBLE TYPE (3) '\" + safe_str_convert(expr) + \"' == \" + \"INTEGER\"\n return \"INTEGER\"\n\n # Must be a string.\n if (isinstance(expr, operators.Concatenation)):\n #print \"POSSIBLE TYPE (4) '\" + safe_str_convert(expr) + \"' == \" + \"STRING\"\n return \"STRING\"\n \n # Harder case. This could be an int or a str (or some other numeric type, but\n # we're not handling that).\n if isinstance(expr, (expressions.BoolExpr, expressions.BoolExprItem, operators.AddSub)):\n\n # If we are doing subtraction we need numeric types.\n if ((hasattr(expr, \"operators\")) and (\"-\" in expr.operators)):\n #print \"POSSIBLE TYPE (5) '\" + safe_str_convert(expr) + \"' == \" + \"INTEGER\"\n return \"INTEGER\"\n \n # We have only '+'. Try to figure out the type based on the parts of the expression.\n r_type = None\n for child in expr.get_children():\n child_type = _infer_type_of_expression(child, context)\n if (child_type is not None):\n r_type = child_type\n #print \"POSSIBLE TYPE (6) '\" + safe_str_convert(child) + \"' == \" + safe_str_convert(r_type)\n return r_type\n\n # Can't figure out the type.\n #print \"POSSIBLE TYPE (7) '\" + safe_str_convert(expr) + \"' == \" + \"UNKNOWN!!\"\n return None", "def explore_expr(expr, value, is_child):\n actual_type = value.type.strip_typedefs()\n print (\"The value of '%s' is of type '%s' \"\n \"which is a typedef of type '%s'\" %\n (expr, str(value.type), str(actual_type)))\n\n Explorer.explore_expr(expr, value.cast(actual_type), is_child)\n return False", "def parse_expression(expression: str) -> nodes.ExpNode:\r\n\r\n tokens = tokenize(expression)\r\n node = build_expression_tree(tokens)\r\n\r\n return node", "def explore_expr(expr, value, is_child):\n type_code = value.type.code\n if type_code in Explorer.type_code_to_explorer_map:\n explorer_class = Explorer.type_code_to_explorer_map[type_code]\n while explorer_class.explore_expr(expr, value, is_child):\n pass\n else:\n print (\"Explorer for type '%s' not yet available.\\n\" %\n str(value.type))", "def is_type_expression(self, expr: Expression, top_level: bool=True) -> bool:\n # Assignment of TypeVar(...) are passed through\n if (isinstance(expr, CallExpr) and\n isinstance(expr.callee, NameExpr) and\n expr.callee.name == 'TypeVar'):\n return True\n elif isinstance(expr, EllipsisExpr):\n return not top_level\n elif isinstance(expr, NameExpr):\n if expr.name in ('True', 'False'):\n return False\n elif expr.name == 'None':\n return not top_level\n else:\n return True\n elif isinstance(expr, IndexExpr) and isinstance(expr.base, NameExpr):\n if isinstance(expr.index, TupleExpr):\n indices = expr.index.items\n else:\n indices = [expr.index]\n if expr.base.name == 'Callable' and len(indices) == 2:\n args, ret = indices\n if isinstance(args, EllipsisExpr):\n indices = [ret]\n elif isinstance(args, ListExpr):\n indices = args.items + [ret]\n else:\n return False\n return all(self.is_type_expression(i, top_level=False) for i in indices)\n else:\n return False", "def build_ast(expression):\n\n # use a directed graph to store the tree\n G = DiGraph()\n\n stack = []\n\n for n in expression:\n # Since the graph does not maintain the order of adding nodes/edges\n # add an extra attribute 'pos' so we can always sort to the correct order\n if isinstance(n, OperatorNode):\n if n.ttype == ept.TOK_TYPE_OP_IN:\n arg2 = stack.pop()\n arg1 = stack.pop()\n G.add_node(arg1, pos=1)\n G.add_node(arg2, pos=2)\n G.add_edge(arg1, n)\n G.add_edge(arg2, n)\n else:\n arg1 = stack.pop()\n G.add_node(arg1, pos=1)\n G.add_edge(arg1, n)\n\n elif isinstance(n, FunctionNode):\n args = [stack.pop() for _ in range(n.num_args)]\n args.reverse()\n for i, a in enumerate(args):\n G.add_node(a, pos=i)\n G.add_edge(a, n)\n # for i in range(n.num_args):\n # G.add_edge(stack.pop(),n)\n else:\n G.add_node(n, pos=0)\n\n stack.append(n)\n\n return G, stack.pop()", "def _next_expression(self, child_types):\n\n # Consume child_types from front to build expression\n expression = []\n while len(expression) < 3:\n # An expression can be longer than 3 tokens if some are unary-negated\n if self._has_unary_negation(child_types, expression):\n expression.append(self._subexpression_type(child_types[:2]))\n [child_types.pop(0) for _ in [0,1]]\n else:\n expression.append(child_types.pop(0))\n return expression", "def load(cls, expr: str, tree_type, parent=None):\n expr = ast.parse(expr, mode='eval')\n return cls.recursive_load(expr, tree_type, parent)", "def eval(self, expression: str) ->'RDLValue':\n # Create local message handler that suppresses the usual output\n # to stderr.\n # Instead raises ValueError on any error\n msg_printer = messages.MessageExceptionRaiser()\n msg_handler = messages.MessageHandler(msg_printer)\n\n input_stream = InputStream(expression)\n\n parsed_tree = sa_systemrdl.parse(\n input_stream,\n \"eval_expr_root\",\n messages.RdlSaErrorListener(msg_handler)\n )\n\n visitor = ExprVisitor(self)\n\n # override visitor to use local message handler\n visitor.msg = msg_handler\n\n result = visitor.visit(parsed_tree)\n result.predict_type()\n return result.get_value()", "def test_parameter_with_type_expression_body(self):\n test_classes = [\n setup_java_class(\"(int foo) -> { return foo + 2; };\"),\n setup_java_class(\"(String s) -> s.length();\"),\n setup_java_class(\"(int foo) -> foo + 1;\"),\n setup_java_class(\"(Thread th) -> { th.start(); };\"),\n setup_java_class(\"(String foo, String bar) -> \"\n \"foo + bar;\"),\n ]\n for test_class in test_classes:\n clazz = parse.parse(test_class)\n self.assert_contains_lambda_expression_in_m(clazz)", "def generate_type_hierarchy(ctx):\n ctx.run(\"./env/bin/python -m puresnmp.types > doc/typetree.rst\")", "def evaluateStructure(compiled_expression):", "def calculate_expression(expression, debug_output=False):\n if debug_output:\n print('{:<14}'.format('Calculating:'), '\"', expression, '\"', sep='')\n\n parser_tree = Calculator.parse_expression(expression)\n\n if debug_output:\n print('{:<14}'.format('Postfix:'), '\"', parser_tree.to_string(), '\"', sep='')\n\n if parser_tree.get_root() is not None:\n Calculator._simplify(parser_tree, parser_tree.get_root())\n\n if debug_output:\n print('{:<14}'.format('Result:'), '\"', parser_tree.to_string(), '\"', sep='')\n print()\n\n return parser_tree", "def result_type(self):\n\n anc = self.find_ancestor(ASTDeclarationNode) or self.find_ancestor(ASTAssignmentNode)\n if anc:\n return anc.type()\n return get_expression_type(self)", "def evaluate(hdf5_array, expression, expression_type, expression_level=0, hyperslice=None):\n # cherrypy.log.error(\"%sEvaluating %s expression: %s\" % (\n # \" \" * expression_level, expression_type, slycat.hyperchunks.tostring(expression)))\n\n if isinstance(expression, int):\n return expression\n elif isinstance(expression, float):\n return expression\n elif isinstance(expression, str):\n return expression\n elif isinstance(expression, slycat.hyperchunks.grammar.AttributeIndex):\n if hyperslice is None:\n return hdf5_array.get_data(expression.index)[...]\n else:\n return hdf5_array.get_data(expression.index)[hyperslice]\n elif isinstance(expression, slycat.hyperchunks.grammar.BinaryOperator):\n left = evaluate(hdf5_array, expression.operands[0], expression_type, expression_level + 1)\n for operand in expression.operands[1:]:\n right = evaluate(hdf5_array, operand, expression_type, expression_level + 1)\n # cherrypy.log.error(\"left::%s \\n right::%s\" % (left, right))\n if expression.operator == \"<\":\n left = left < right\n elif expression.operator == \">\":\n left = left > right\n elif expression.operator == \"<=\":\n left = left <= right\n elif expression.operator == \">=\":\n left = left >= right\n elif expression.operator == \"==\":\n if numpy.isnan(right):\n left = numpy.isnan(left)\n else:\n left = left == right\n elif expression.operator == \"!=\":\n left = left != right\n elif expression.operator == \"and\":\n left = numpy.logical_and(left, right)\n elif expression.operator == \"or\":\n left = numpy.logical_or(left, right)\n elif expression.operator == \"in\":\n left = numpy.in1d(left, right)\n elif expression.operator == \"not in\":\n left = numpy.in1d(left, right, invert=True)\n else:\n cherrypy.log.error(\"slycat.web.server.__init__.py evaluate\",\n \"Unknown operator: %s\" % expression.operator)\n raise ValueError(\"Unknown operator: %s\" % expression.operator)\n return left\n elif isinstance(expression, slycat.hyperchunks.grammar.FunctionCall):\n if expression.name == \"index\":\n if hyperslice is None:\n return numpy.indices(hdf5_array.shape)[expression.args[0]]\n else:\n return numpy.indices(hdf5_array.shape)[expression.args[0]][hyperslice]\n elif expression.name == \"rank\":\n values = evaluate(hdf5_array, expression.args[0], expression_type, expression_level + 1)\n order = numpy.argsort(values)\n if expression.args[1] == \"desc\":\n order = order[::-1]\n return order\n else:\n cherrypy.log.error(\"slycat.web.server.__init__.py evaluate\", \"Unknown function: %s\" % expression.name)\n raise ValueError(\"Unknown function: %s\" % expression.name)\n elif isinstance(expression, slycat.hyperchunks.grammar.List):\n return expression.values\n else:\n cherrypy.log.error(\"slycat.web.server.__init__.py evaluate\", \"Unknown expression: %s\" % expression)\n raise ValueError(\"Unknown expression: %s\" % expression)", "def evaluateExpression(expr):\n\toperators = {ast.Add: op.add, ast.Sub: op.sub, ast.Mult: op.mul,\n\t\t\t\t ast.Div: op.truediv, ast.USub: op.neg, ast.Pow: myPow}\n\tnode = ast.parse(expr.strip(), mode='eval')\n\treturn evaluate(node.body,operators)", "def main(expression):\n\n exception = parse_expression(expression)\n return calc(poland_notation(exception))", "def expression(self) -> Expression:\n ...", "def _evaluate(expression, isNumpy=True, **kwargs):\n if isNumber(expression):\n if isNumpy:\n return expressionToNumber(expression)\n else:\n return expression\n # Evaluate\n expr = substitute(expression, **kwargs)\n # Symbol substitution can create a number\n if isNumber(expr):\n return expr\n val = expr.evalf()\n if hasSymbols(val):\n return val\n if isNumpy:\n if \"rows\" in dir(expression):\n result = np.array(val)\n else:\n try:\n result = float(val)\n except TypeError:\n result = complex(val)\n else:\n result = val\n return result", "def main():\n try:\n while True:\n try:\n text = input(' enter expression > ')\n except EOFError:\n break\n if not text:\n continue\n\n # instantiate interpreter\n interpreter = Interpreter(text)\n interpreter.interpret()\n\n # print the value of variable in scope after interpreting every syntax.\n print(\"\\n\".join(\"{} = {}\".format(key, value) for key, value in interpreter.VARIABLES.items()))\n except:\n print(\"error\")", "def get_type(self, expr: expressions.Expression) -> types.Type:\n if expr not in self._type_cache:\n inferred_type = expr.infer_type(self)\n inferred_type.validate_in(self)\n self._type_cache[expr] = inferred_type\n return self._type_cache[expr]", "def evaluate(expression, stream):\n assert expression.data == 'expression', expression\n assert len(expression.children) == 1\n\n expression = expression.children[0]\n\n if expression.data == 'identity':\n for node in stream:\n yield node\n\n elif expression.data == 'primitive':\n yield primitive(expression)\n\n elif expression.data == 'properties':\n for node in properties(expression, stream):\n yield node\n\n elif expression.data == 'indexer':\n for node in indexer(expression, stream):\n yield node\n\n elif expression.data == 'iterator':\n for node in iterate(expression, stream):\n yield node\n\n elif expression.data == 'concatenator':\n for node in concatenate(expression, stream):\n yield node\n\n else:\n assert False, 'bad expression {}'.format(expression)", "def evaluate(self, tree):\n\t\tpass", "def explore_type(name, datatype, is_child):\n type_code = datatype.code\n if type_code in Explorer.type_code_to_explorer_map:\n explorer_class = Explorer.type_code_to_explorer_map[type_code]\n while explorer_class.explore_type(name, datatype, is_child):\n pass\n else:\n print (\"Explorer for type '%s' not yet available.\\n\" %\n str(datatype))", "def infer_type(node):\n mod = tvm.IRModule.from_expr(node)\n mod = relay.transform.InferType()(mod)\n entry = mod[\"main\"]\n return entry if isinstance(node, relay.Function) else entry.body", "def main():\n\n args = parser.parse_args()\n try:\n converter = RPN(args.modules)\n postfix_expression = converter.convert(args.expr)\n\n if args.show_postfix:\n print(postfix_expression)\n\n print(converter.calculate(postfix_expression))\n except Exception as exception:\n if str(exception) == 'brackets are not balanced':\n print('ERROR: ' + str(exception))\n exit(1)\n elif str(exception).startswith('No module named'):\n print('ERROR: ' + str(exception))\n exit(2)\n else:\n print('ERROR: ' + str(exception))\n exit(3)", "def explore_type(name, datatype, is_child):\n target_type = datatype.target()\n Explorer.explore_type(name, target_type, is_child)\n return False", "def evaluate(compiled_expression):", "def exeval(expression): \n if len(expression) <= 3: #Assuming no spaces (\" \") between each value given in the expression\n if expression[0] == \"+\":\n return float(expression[1]) + float(expression[2])\n elif expression[0] == \"-\":\n return float(expression[1]) - float(expression[2])\n else:\n if expression[0] == \"+\":\n return float(expression[1]) + exeval(expression[2:])\n elif expression[0] == \"-\":\n return float(expression[1]) - exeval(expression[2:])" ]
[ "0.61831594", "0.5693838", "0.56593543", "0.5624738", "0.5615666", "0.5555837", "0.5478738", "0.54660076", "0.5333142", "0.5242742", "0.5225837", "0.52114373", "0.52045923", "0.51956785", "0.51615644", "0.5066505", "0.49917197", "0.49774233", "0.49624798", "0.49380043", "0.49058953", "0.48923922", "0.48776928", "0.4856614", "0.485335", "0.4852529", "0.48514476", "0.48376614", "0.48085225", "0.4795927" ]
0.6376541
0
Calculate the type of a leaflevel expression. That is, an expression with no complex child nodes that cannot be described by a single string type.
def _subexpression_type(self, expression): # Return the first matched TypeRule's output type, or None if no match for type_rule in self._type_rules[len(expression)]: applied_type = type_rule.apply(expression) if applied_type is not None: return applied_type return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def expression_type(self, node):\n\n # End recursion & return this node's type if it is a leaf or nonexistent\n if not node:\n return None\n if not node.get_children():\n return self._subexpression_type(self._node_to_expression(node))\n else:\n # Iteratively consume expression to calculate the type of this node.\n # Recurse on complex children.\n child_types = self._child_types(node.get_children())\n while len(child_types) > 1:\n next_expression = self._next_expression(child_types)\n next_expression_type = self._subexpression_type(next_expression)\n child_types.insert(0, next_expression_type)\n return child_types[0]", "def CheckLeafType(leaf):\n if leaf not in ['upper', 'lower', 'both']:\n raise ValueError()\n return leaf", "def result_type(self):\n\n anc = self.find_ancestor(ASTDeclarationNode) or self.find_ancestor(ASTAssignmentNode)\n if anc:\n return anc.type()\n return get_expression_type(self)", "def explore_expr(expr, value, is_child):\n actual_type = value.type.strip_typedefs()\n print (\"The value of '%s' is of type '%s' \"\n \"which is a typedef of type '%s'\" %\n (expr, str(value.type), str(actual_type)))\n\n Explorer.explore_expr(expr, value.cast(actual_type), is_child)\n return False", "def _infer_type_of_expression(expr, context):\n\n import operators\n import vba_library\n\n #print \"LOOK FOR TYPE\"\n #print expr\n #print type(expr)\n\n # Function with a hard coded type?\n if (hasattr(expr, \"return_type\")):\n #print \"POSSIBLE TYPE (1) '\" + safe_str_convert(expr) + \"' == \" + safe_str_convert(expr.return_type())\n return expr.return_type()\n\n # Call of function?\n import expressions\n if (isinstance(expr, expressions.Function_Call)):\n\n # Call of builtin function?\n if (expr.name.lower() in vba_library.VBA_LIBRARY):\n builtin = vba_library.VBA_LIBRARY[expr.name.lower()]\n if (hasattr(builtin, \"return_type\")):\n #print \"POSSIBLE TYPE (2.1) '\" + safe_str_convert(expr) + \"' == \" + safe_str_convert(builtin.return_type())\n return builtin.return_type()\n\n # Call of locally defined function.\n r = _get_local_func_type(expr, context)\n #print \"POSSIBLE TYPE (2.2) '\" + safe_str_convert(expr) + \"' == \" + safe_str_convert(r)\n return r\n \n # Easy cases. These have to be integers.\n if isinstance(expr, (operators.And,\n operators.Division,\n operators.FloorDivision,\n operators.Mod,\n operators.MultiDiv,\n operators.Multiplication,\n operators.Neg, operators.Not,\n operators.Or,\n operators.Power,\n operators.Subtraction,\n operators.Xor)):\n #print \"POSSIBLE TYPE (3) '\" + safe_str_convert(expr) + \"' == \" + \"INTEGER\"\n return \"INTEGER\"\n\n # Must be a string.\n if (isinstance(expr, operators.Concatenation)):\n #print \"POSSIBLE TYPE (4) '\" + safe_str_convert(expr) + \"' == \" + \"STRING\"\n return \"STRING\"\n \n # Harder case. This could be an int or a str (or some other numeric type, but\n # we're not handling that).\n if isinstance(expr, (expressions.BoolExpr, expressions.BoolExprItem, operators.AddSub)):\n\n # If we are doing subtraction we need numeric types.\n if ((hasattr(expr, \"operators\")) and (\"-\" in expr.operators)):\n #print \"POSSIBLE TYPE (5) '\" + safe_str_convert(expr) + \"' == \" + \"INTEGER\"\n return \"INTEGER\"\n \n # We have only '+'. Try to figure out the type based on the parts of the expression.\n r_type = None\n for child in expr.get_children():\n child_type = _infer_type_of_expression(child, context)\n if (child_type is not None):\n r_type = child_type\n #print \"POSSIBLE TYPE (6) '\" + safe_str_convert(child) + \"' == \" + safe_str_convert(r_type)\n return r_type\n\n # Can't figure out the type.\n #print \"POSSIBLE TYPE (7) '\" + safe_str_convert(expr) + \"' == \" + \"UNKNOWN!!\"\n return None", "def primitive(expression):\n expression = expression.children[0]\n if expression.data == 'null':\n return null\n elif expression.data == 'boolean':\n return expression.children[0] == 'true'\n elif expression.data == 'string':\n return expression.children[0][1:-1]\n elif expression.data == 'integer':\n return int(expression.children[0])\n elif expression.data == 'float':\n return float(expression.children[0])\n assert False, 'bad primitive {}'.format(expression)", "def _next_expression(self, child_types):\n\n # Consume child_types from front to build expression\n expression = []\n while len(expression) < 3:\n # An expression can be longer than 3 tokens if some are unary-negated\n if self._has_unary_negation(child_types, expression):\n expression.append(self._subexpression_type(child_types[:2]))\n [child_types.pop(0) for _ in [0,1]]\n else:\n expression.append(child_types.pop(0))\n return expression", "def load(cls, expr: str, tree_type, parent=None):\n expr = ast.parse(expr, mode='eval')\n return cls.recursive_load(expr, tree_type, parent)", "def is_type_expression(self, expr: Expression, top_level: bool=True) -> bool:\n # Assignment of TypeVar(...) are passed through\n if (isinstance(expr, CallExpr) and\n isinstance(expr.callee, NameExpr) and\n expr.callee.name == 'TypeVar'):\n return True\n elif isinstance(expr, EllipsisExpr):\n return not top_level\n elif isinstance(expr, NameExpr):\n if expr.name in ('True', 'False'):\n return False\n elif expr.name == 'None':\n return not top_level\n else:\n return True\n elif isinstance(expr, IndexExpr) and isinstance(expr.base, NameExpr):\n if isinstance(expr.index, TupleExpr):\n indices = expr.index.items\n else:\n indices = [expr.index]\n if expr.base.name == 'Callable' and len(indices) == 2:\n args, ret = indices\n if isinstance(args, EllipsisExpr):\n indices = [ret]\n elif isinstance(args, ListExpr):\n indices = args.items + [ret]\n else:\n return False\n return all(self.is_type_expression(i, top_level=False) for i in indices)\n else:\n return False", "def tree_string(self):\n if self.body is None:\n return 0\n if self.left is None and self.right is None:\n return self.body\n left_sum = self.left.tree_string()\n right_sum = self.right.tree_string()\n if self.body == add:\n return str(left_sum) + '+' + str(right_sum)\n elif self.body == sub:\n return str(left_sum) + '-' + str(right_sum)\n elif self.body == mul:\n return str(left_sum) + '*' + str(right_sum)\n else:\n return str(left_sum) + '/' + str(right_sum)", "def treetype(self):\n\t\treturn self._treetype", "def type_hierarchy(self):\n\t\treturn self._node.type_hierarchy", "def expression_depth(expr):\n \n return depth_helper(expr, 0, set())", "def compute_tree(self, x):\n if (self.body in operators):\n try:\n return self.body(self.left.compute_tree(x), self.right.compute_tree(x))\n except:\n return float(\"inf\")\n elif self.body == 'x': return x\n else: return self.body", "def generic_leaf(self, value, depth, available):\n return repr(value), False", "def leaf_NoneType(self, value, depth, available):\n return \"null\", False", "def literal(self, tree: lark.Tree) -> Result:\n if len(tree.children) != 1:\n raise CELSyntaxError(\n f\"{tree.data} {tree.children}: bad literal node\",\n line=tree.meta.line,\n column=tree.meta.column,\n\n )\n value_token = cast(lark.Token, tree.children[0])\n try:\n result: Result\n if value_token.type == \"FLOAT_LIT\":\n result = celpy.celtypes.DoubleType(value_token.value)\n elif value_token.type == \"INT_LIT\":\n result = celpy.celtypes.IntType(value_token.value)\n elif value_token.type == \"UINT_LIT\":\n if not value_token.value[-1].lower() == 'u':\n raise CELSyntaxError(\n f\"invalid unsigned int literal {value_token!r}\",\n line=tree.meta.line,\n column=tree.meta.column,\n )\n result = celpy.celtypes.UintType(value_token.value[:-1])\n elif value_token.type in (\"MLSTRING_LIT\", \"STRING_LIT\"):\n result = celstr(value_token)\n elif value_token.type == \"BYTES_LIT\":\n result = celbytes(value_token)\n elif value_token.type == \"BOOL_LIT\":\n result = (\n celpy.celtypes.BoolType(value_token.value.lower() == \"true\")\n )\n elif value_token.type == \"NULL_LIT\":\n result = None\n else:\n raise CELUnsupportedError(\n f\"{tree.data} {tree.children}: type not implemented\",\n line=value_token.line,\n column=value_token.column,\n )\n except ValueError as ex:\n result = CELEvalError(ex.args[0], ex.__class__, ex.args, tree=tree)\n\n return result", "def _lval(self, s):\n if s.lower() in self.parent.outputs:\n return self.parent.outputs[s.lower()]\n else: # expression\n return s.lower()", "def test_get_node_type_name(self):\n pass", "def evaluate(self, tree):\n\t\tpass", "def evaluate_expression_tree(root:Node) -> float:\n if root is None:\n return 0\n if root._left is None and root._right is None:\n return float(root._data)\n left_sum = evaluate_expression_tree(root._left)\n right_sum = evaluate_expression_tree(root._right)\n if root._data == '+':\n return left_sum + right_sum\n elif root._data == '-':\n return left_sum - right_sum\n elif root._data == '*':\n return left_sum * right_sum\n elif root._data == '/':\n return left_sum / right_sum\n elif root._data == '^':\n return left_sum ** right_sum\n else:\n raise ArithmeticError(root._data)", "def explore_type(name, datatype, is_child):\n actual_type = datatype.strip_typedefs()\n if is_child:\n print (\"The type of %s is a typedef of type '%s'.\" %\n (name, str(actual_type)))\n else:\n print (\"The type '%s' is a typedef of type '%s'.\" %\n (name, str(actual_type)))\n\n Explorer.explore_type(name, actual_type, is_child)\n return False", "def evaluateStructure(compiled_expression):", "def evaluateExpression(expr):\n\toperators = {ast.Add: op.add, ast.Sub: op.sub, ast.Mult: op.mul,\n\t\t\t\t ast.Div: op.truediv, ast.USub: op.neg, ast.Pow: myPow}\n\tnode = ast.parse(expr.strip(), mode='eval')\n\treturn evaluate(node.body,operators)", "def check_tree_type(tree):\n return tree.type in ref", "def getType(self):\n return _libsbml.ASTNode_getType(self)", "def explore_type(name, datatype, is_child):\n target_type = datatype.target()\n Explorer.explore_type(name, target_type, is_child)\n return False", "def _child_types(self, children):\n\n # Recursively determine types on each child node\n child_types = []\n for child in children:\n child_types.append(self.expression_type(child))\n return child_types", "def get_subterms(expr):\n av_expr = []\n expr_types = []\n if isinstance(expr, Term):\n if expr.subterms:\n for s in expr.subterms:\n new_av, new_type = get_subterms(s)\n av_expr += new_av\n expr_types += new_type\n new_type = expr.type\n expr_types.append(new_type)\n av_expr.append(expr)\n else:\n av_expr.append(expr)\n expr_types.append(expr.type)\n elif type(expr) != str:\n if expr.term:\n new_av, new_type = get_subterms(expr.term)\n av_expr += new_av\n expr_types += new_type\n return av_expr, expr_types", "def get_type(cond, template, rep, subrep, sublevel, subtemplate):\n clean_rep = rep.replace(\"%\",\"\")\n clean_subrep = subrep.replace(\"%\",\"\")\n if subrep == '' and sublevel == '' and subtemplate == '':\n tetype = 1\n elif sublevel == 'location':\n tetype = 2\n elif sublevel == '' and subtemplate == '':\n tetype = 3\n elif sublevel == '':\n tetype = 4\n elif clean_rep == clean_subrep[:len(clean_rep)]:\n tetype = 5\n else:\n tetype = 6\n return tetype" ]
[ "0.7099167", "0.61412406", "0.609437", "0.58264744", "0.5807559", "0.54864943", "0.5333536", "0.52839464", "0.5262899", "0.5258796", "0.52191246", "0.52068317", "0.5178198", "0.51626647", "0.50838065", "0.50707287", "0.5054888", "0.5040522", "0.49886164", "0.49787894", "0.49498233", "0.49263915", "0.49244255", "0.49232", "0.49156383", "0.49146628", "0.49137765", "0.49054864", "0.48803723", "0.48760715" ]
0.6180652
1
Convert all nodes in an expression list to their associated types. This search handles the bulk of the parser's recursion, as it will start recursion on any node in the expression which is not a leaf node.
def _child_types(self, children): # Recursively determine types on each child node child_types = [] for child in children: child_types.append(self.expression_type(child)) return child_types
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def expression_type(self, node):\n\n # End recursion & return this node's type if it is a leaf or nonexistent\n if not node:\n return None\n if not node.get_children():\n return self._subexpression_type(self._node_to_expression(node))\n else:\n # Iteratively consume expression to calculate the type of this node.\n # Recurse on complex children.\n child_types = self._child_types(node.get_children())\n while len(child_types) > 1:\n next_expression = self._next_expression(child_types)\n next_expression_type = self._subexpression_type(next_expression)\n child_types.insert(0, next_expression_type)\n return child_types[0]", "def _parse_types(self):\n for root in self.roots:\n for types in root.iter('types'):\n for node in types.iter('type'):\n type_name = GLGenerator.get_name(node)\n text = GLGenerator.get_text(node).strip()\n if '*' in text and not text.startswith('struct'):\n self.pointer_types.append(type_name)", "def _next_expression(self, child_types):\n\n # Consume child_types from front to build expression\n expression = []\n while len(expression) < 3:\n # An expression can be longer than 3 tokens if some are unary-negated\n if self._has_unary_negation(child_types, expression):\n expression.append(self._subexpression_type(child_types[:2]))\n [child_types.pop(0) for _ in [0,1]]\n else:\n expression.append(child_types.pop(0))\n return expression", "def allNodeTypes(*args, includeAbstract: bool=True, **kwargs)->List[AnyStr]:\n pass", "def explore_expr(expr, value, is_child):\n actual_type = value.type.strip_typedefs()\n print (\"The value of '%s' is of type '%s' \"\n \"which is a typedef of type '%s'\" %\n (expr, str(value.type), str(actual_type)))\n\n Explorer.explore_expr(expr, value.cast(actual_type), is_child)\n return False", "def flatten_list(node):\n flat_elems = []\n for elem in node:\n if elem == 'expr1024':\n for subelem in elem:\n for subsubelem in subelem:\n flat_elems.append(subsubelem)\n elif elem == 'expr32':\n for subelem in elem:\n flat_elems.append(subelem)\n else:\n flat_elems.append(elem)\n pass\n pass\n return flat_elems", "def build_ast(expression):\n\n # use a directed graph to store the tree\n G = DiGraph()\n\n stack = []\n\n for n in expression:\n # Since the graph does not maintain the order of adding nodes/edges\n # add an extra attribute 'pos' so we can always sort to the correct order\n if isinstance(n, OperatorNode):\n if n.ttype == ept.TOK_TYPE_OP_IN:\n arg2 = stack.pop()\n arg1 = stack.pop()\n G.add_node(arg1, pos=1)\n G.add_node(arg2, pos=2)\n G.add_edge(arg1, n)\n G.add_edge(arg2, n)\n else:\n arg1 = stack.pop()\n G.add_node(arg1, pos=1)\n G.add_edge(arg1, n)\n\n elif isinstance(n, FunctionNode):\n args = [stack.pop() for _ in range(n.num_args)]\n args.reverse()\n for i, a in enumerate(args):\n G.add_node(a, pos=i)\n G.add_edge(a, n)\n # for i in range(n.num_args):\n # G.add_edge(stack.pop(),n)\n else:\n G.add_node(n, pos=0)\n\n stack.append(n)\n\n return G, stack.pop()", "def get_subterms(expr):\n av_expr = []\n expr_types = []\n if isinstance(expr, Term):\n if expr.subterms:\n for s in expr.subterms:\n new_av, new_type = get_subterms(s)\n av_expr += new_av\n expr_types += new_type\n new_type = expr.type\n expr_types.append(new_type)\n av_expr.append(expr)\n else:\n av_expr.append(expr)\n expr_types.append(expr.type)\n elif type(expr) != str:\n if expr.term:\n new_av, new_type = get_subterms(expr.term)\n av_expr += new_av\n expr_types += new_type\n return av_expr, expr_types", "def listNodeTypes(*args, exclude: AnyStr=\"\", **kwargs)->List[AnyStr]:\n pass", "def _convert_all(self, ast, label, idlnode_ctor):\n res = []\n found = self._find_all(ast, label)\n if not found:\n return res\n if not isinstance(found, list):\n raise RuntimeError(\"Expected list but %s found\" % type(found))\n for childAst in found:\n converted = idlnode_ctor(childAst)\n res.append(converted)\n return res", "def _parse_types(self, die):\n if die.offset in self._visited_die_offset:\n return\n else:\n self._visited_die_offset.append(die.offset)\n\n if die.tag == \"DW_TAG_base_type\":\n self._parse_base_type(die)\n\n elif die.tag == \"DW_TAG_const_type\":\n self._parse_const_type(die)\n\n elif die.tag == \"DW_TAG_volatile_type\":\n self._parse_volatile_type(die)\n\n elif die.tag == \"DW_TAG_typedef\":\n self._parse_typedef(die)\n\n elif die.tag == \"DW_TAG_pointer_type\":\n self._parse_pointer_type(die)\n\n elif die.tag == \"DW_TAG_array_type\":\n self._parse_array_type(die)\n\n elif die.tag == \"DW_TAG_enumeration_type\":\n self._parse_enums_type(die)\n\n # union and class are not implemented yet, use structure.\n elif die.tag == \"DW_TAG_structure_type\":\n self._parse_structure_type(die)\n elif die.tag == \"DW_TAG_union_type\":\n self._parse_structure_type(die)\n elif die.tag == \"DW_TAG_class_type\":\n self._parse_structure_type(die)\n\n elif die.tag == \"DW_TAG_subroutine_type\":\n self._parse_subroutine_type(die)\n\n else:\n ...\n\n if die.tag == \"DW_TAG_compile_unit\":\n return\n\n # if has children, iter them, except DW_TAG_compile_unit.\n for child_die in die.iter_children():\n self._parse_types(child_die)", "def list_node_types(self):\n return list(nodelist.all_nodes.keys())", "def _subexpression_type(self, expression):\n\n # Return the first matched TypeRule's output type, or None if no match\n for type_rule in self._type_rules[len(expression)]:\n applied_type = type_rule.apply(expression)\n if applied_type is not None:\n return applied_type\n return None", "def parse_itypes(item_types_txt):\n\n itype_nodes = {}\n parent_codes = {}\n\n for item_type in item_types_txt:\n code = item_type[\"Code\"]\n if not code:\n continue\n if code in itype_nodes:\n raise KeyError(f\"Duplicate itype code '{code}' found\")\n\n node = itype_nodes[code] = ITypeNode(code)\n parent_code_list = parent_codes[code] = []\n if item_type[\"Equiv1\"]:\n parent_code_list.append(item_type[\"Equiv1\"])\n if item_type[\"Equiv2\"]:\n parent_code_list.append(item_type[\"Equiv2\"])\n\n # Link parents and children\n for code, node in itype_nodes.items():\n for p_code in parent_codes[code]:\n parent_node = itype_nodes[p_code]\n parent_node.children.append(node)\n node.parents.append(parent_node)\n\n return itype_nodes", "def generic_visit(self, node):\r\n for field, value in iter_fields(node):\r\n if isinstance(value, list):\r\n for item in value:\r\n if isinstance(item, AST):\r\n self.visit(item)\r\n elif isinstance(value, AST):\r\n self.visit(value)", "def load(cls, expr: str, tree_type, parent=None):\n expr = ast.parse(expr, mode='eval')\n return cls.recursive_load(expr, tree_type, parent)", "def convert_type(self):\r\n tree = self.substitute.tree_\r\n classes = self.substitute.classes_\r\n node_list = []\r\n for i in range(tree.capacity):\r\n if tree.feature[i] == -2:\r\n node_list.append(Node(label=classes[np.argmax(tree.value[i, 0, :])]))\r\n else:\r\n node_list.append(Node(tree.feature[i], tree.threshold[i]))\r\n for i in range(tree.capacity):\r\n if tree.children_left[i] != -1:\r\n node_list[i].left = node_list[tree.children_left[i]]\r\n node_list[tree.children_left[i]].parents.append(node_list[i]) if node_list[i] not in node_list[tree.children_left[i]].parents else node_list[tree.children_left[i]].parents\r\n if tree.children_right[i] != -1:\r\n node_list[i].right = node_list[tree.children_right[i]]\r\n node_list[tree.children_right[i]].parents.append(node_list[i]) if node_list[i] not in node_list[tree.children_right[i]].parents else node_list[tree.children_right[i]].parents\r\n return node_list[0]", "def explore_expr(expr, value, is_child):\n type_code = value.type.code\n if type_code in Explorer.type_code_to_explorer_map:\n explorer_class = Explorer.type_code_to_explorer_map[type_code]\n while explorer_class.explore_expr(expr, value, is_child):\n pass\n else:\n print (\"Explorer for type '%s' not yet available.\\n\" %\n str(value.type))", "def generic_visit(self, node):\n for field in node._fields:\n try:\n value = getattr(node, field)\n except AttributeError:\n continue\n if isinstance(value, list):\n for item in value:\n if isinstance(item, ast.AST):\n self.visit(item)\n elif isinstance(value, ast.AST):\n self.visit(value)", "def generic_visit(self, node):\n for field, value in ast.iter_fields(node):\n if isinstance(value, list):\n for item in reversed(value):\n if isinstance(item, ast.AST):\n self.visit(item)\n elif isinstance(value, ast.AST):\n self.visit(value)", "def types(cls, root):\r\n return cls._TYPES_BY_ROOT[root]", "def node_type_filter(node_list, *filter_types):\n\n flg = logging.getLogger(\"lettuce.xgenSetup.node_type_filter\")\n\n flg.info(\"Filtering Node List\")\n\n filtered_list = []\n for node in node_list:\n node_type = mc.nodeType(node)\n flg.debug(\"Node, {0}, is of type, {1}\".format(node, node_type))\n if node_type not in filter_types:\n flg.debug(\"Node kept\")\n filtered_list.append(node)\n else:\n flg.debug(\"Node filtered\")\n flg.info(\"Returning Filtered List\")\n return filtered_list", "def symbify(nodes):\n if not nodes: return None\n \n root, children = nodes\n return [(unique_id(root), node_type(root)), \n [symbify(child) for child in children]]", "def visit_expr(self, node, visited_children):\n output = []\n for child in visited_children[0]:\n output.append(child)\n return output", "def expression_to_tree(expression):\n\n # break string into tokens, parsing parenthesized sub-expressions\n\n tokens = []\n current_token = \"\"\n in_quotes = False\n paren_count = 0\n paren_start = None\n\n for i, c in enumerate(expression):\n\n if c in QUOTES and paren_count == 0:\n if in_quotes:\n tokens.append(current_token + c)\n current_token = \"\"\n in_quotes = False\n else:\n in_quotes = True\n if current_token:\n tokens.append(current_token)\n current_token = c\n elif c == \" \" and not in_quotes and paren_count == 0:\n if current_token:\n tokens.append(current_token)\n current_token = \"\"\n elif c == \"(\":\n paren_count += 1\n if paren_count == 1:\n paren_start = i\n elif c == \")\":\n paren_count -= 1\n if paren_count == -1:\n raise RuntimeError(\"Unbalanced right parenthesis in expression\")\n if paren_count == 0:\n tokens.append(expression_to_tree(expression[paren_start + 1:i]))\n current_token = \"\"\n elif paren_count == 0:\n current_token += c\n\n if current_token:\n tokens.append(current_token)\n\n if paren_count > 0:\n raise RuntimeError(\"Unbalanced left parenthesis in expression\")\n\n # convert string tokens to ASTNodes\n\n nodes = []\n\n for token in tokens:\n\n if isinstance(token, ASTNode):\n nodes.append(token)\n continue\n\n # noinspection PyTypeChecker\n match = QUOTE_PATTERN.match(token)\n\n if token.upper() in OPERATOR_TOKENS:\n nodes.append(OPERATOR_TOKENS[token.upper()]())\n elif match:\n nodes.append(TextNode(match.group(\"text\")))\n else:\n raise RuntimeError(\n \"Invalid token `{}` in expression string\".format(token))\n\n # parse NOT tokens\n\n infix_nodes = []\n\n nodes.reverse()\n\n while nodes:\n node = nodes.pop()\n if isinstance(node, NotNode):\n node.left_child = nodes.pop()\n infix_nodes.append(node)\n\n # set up nodes as a stack\n\n infix_nodes.reverse()\n\n # shunting-yard\n\n operator_stack = []\n operand_stack = []\n\n while infix_nodes:\n node = infix_nodes.pop()\n if isinstance(node, OperatorNode):\n if operator_stack and operator_stack[-1] >= node:\n operand_stack.append(operator_stack.pop())\n operator_stack.append(node)\n else:\n operand_stack.append(node)\n\n operand_stack.extend(operator_stack[::-1])\n\n operand_stack.reverse()\n\n output_stack = []\n\n while operand_stack:\n node = operand_stack.pop()\n if isinstance(node, OperatorNode):\n node.left_child = output_stack.pop()\n node.right_child = output_stack.pop()\n output_stack.append(node)\n\n return output_stack.pop()", "def _convert(val, acceptable_types):\n return parse_expression(val, acceptable_types, raise_type=ParseError)", "def build_expression_tree(token_list: Sequence[tokens.Token]) -> nodes.ExpNode:\r\n\r\n def is_unary_op(op) -> bool:\r\n return op in UNARYOP_TABLE\r\n\r\n def is_open_bracket(token) -> bool:\r\n return isinstance(token, tokens.TokenOpenBracket)\r\n\r\n def is_close_bracket(token) -> bool:\r\n return isinstance(token, tokens.TokenCloseBracket)\r\n\r\n def is_comma(token) -> bool:\r\n return isinstance(token, tokens.TokenSymbol) and token.symbol == Separators.SEP_COMMA\r\n\r\n def is_higher_or_equal_op_priority(op1, op2, table) -> bool:\r\n oi1 = table.get(op1)\r\n oi2 = table.get(op2)\r\n\r\n p1 = 0 if oi1 is None else oi1.priority\r\n p2 = 0 if oi2 is None else oi2.priority\r\n\r\n return p1 >= p2\r\n\r\n def read_exp_chain(index) -> Tuple[nodes.ExpNode, int]:\r\n token = token_list[index]\r\n if isinstance(token, tokens.TokenSymbol):\r\n if is_open_bracket(token):\r\n node, i = read_exp(index)\r\n elif is_unary_op(token.symbol):\r\n if UNARYOP_TABLE[token.symbol].affix == OperatorAffix.PREFIX:\r\n node, i = read_prefix_unary_exp(index)\r\n else:\r\n raise ParsingException(f\"unary operator '{token.symbol}' is not a prefix operator\", token.pos)\r\n else:\r\n raise ParsingException(f\"unexpected symbol '{token.symbol}'\", token.pos)\r\n else:\r\n node, i = read_exp(index)\r\n\r\n if i < len(token_list):\r\n # look ahead for 1 token\r\n next_token = token_list[i]\r\n if isinstance(next_token, tokens.TokenSymbol) and is_unary_op(next_token.symbol):\r\n if UNARYOP_TABLE[next_token.symbol].affix == OperatorAffix.POSTFIX:\r\n node, i = read_postfix_unary_exp(i, node)\r\n else:\r\n return (node, i)\r\n\r\n if i < len(token_list):\r\n # look ahead for 1 token\r\n next_token = token_list[i]\r\n if is_close_bracket(next_token):\r\n return (node, i)\r\n elif isinstance(next_token, tokens.TokenSymbol):\r\n if next_token.symbol == Separators.SEP_COMMA:\r\n return (node, i)\r\n elif next_token.symbol in BINOP_TABLE:\r\n return read_binary_exp(i, node)\r\n else:\r\n raise ParsingException(f\"unexpected symbol '{next_token.symbol}'\", next_token.pos)\r\n else:\r\n raise ParsingException(\"unexpected token\", next_token.pos)\r\n else:\r\n return (node, i)\r\n\r\n def read_exp(index) -> Tuple[nodes.ExpNode, int]:\r\n if index >= len(token_list):\r\n raise ParsingException(\"unexpected token\", token_list[-1].pos)\r\n\r\n token = token_list[index]\r\n if is_open_bracket(token):\r\n return read_bracket_exp(index)\r\n elif isinstance(token, tokens.TokenNumber):\r\n return (nodes.NumberNode(token.num, pos=token.pos), index + 1)\r\n elif isinstance(token, tokens.TokenName):\r\n if (index + 1) < len(token_list) and is_open_bracket(token_list[index + 1]):\r\n return read_func_call(index)\r\n else:\r\n return (nodes.NameConstantNode(token.name, pos=token.pos), index + 1)\r\n elif isinstance(token, tokens.TokenSymbol):\r\n raise ParsingException(f\"unexpected symbol '{token.symbol}'\", token.pos)\r\n else:\r\n raise ParsingException(\"unexpceted token\", token.pos)\r\n\r\n def read_bracket_exp(index) -> Tuple[nodes.ExpNode, int]:\r\n node, i = read_exp_chain(index + 1)\r\n\r\n if i < len(token_list) and is_close_bracket(token_list[i]):\r\n return (node, i + 1)\r\n else:\r\n raise ParsingException(\"unmatch '('\", token_list[index].pos)\r\n\r\n def read_prefix_unary_exp(index) -> Tuple[nodes.UnaryOpNode, int]:\r\n node, i = read_exp(index + 1)\r\n token = token_list[index]\r\n return (nodes.UnaryOpNode(token.symbol, node, pos=token.pos), i)\r\n\r\n def read_postfix_unary_exp(index, child: nodes.ExpNode) -> Tuple[nodes.UnaryOpNode, int]:\r\n token = token_list[index]\r\n\r\n if isinstance(child, nodes.UnaryOpNode):\r\n if is_higher_or_equal_op_priority(token.symbol, child.op, UNARYOP_TABLE):\r\n node = nodes.UnaryOpNode(token.symbol, child.child, pos=token.pos)\r\n child.child = node\r\n node = child\r\n else:\r\n node = nodes.UnaryOpNode(token.symbol, child, pos=token.pos)\r\n else:\r\n node = nodes.UnaryOpNode(token.symbol, child, pos=token.pos)\r\n\r\n return (node, index + 1)\r\n\r\n def read_binary_exp(index, left: nodes.ExpNode) -> Tuple[nodes.BinaryOpNode, int]:\r\n right, i = read_exp_chain(index + 1)\r\n\r\n token = token_list[index]\r\n if isinstance(right, nodes.BinaryOpNode) and not is_open_bracket(token_list[index + 1]):\r\n # check operator priority and rotate the expression tree when necessary.\r\n # when priority of two operators are equal, we also should rotate the tree\r\n # in case these operators don't follow the commutative law.\r\n if is_higher_or_equal_op_priority(token.symbol, right.op, BINOP_TABLE):\r\n node = nodes.BinaryOpNode(token.symbol, left, right.left, pos=token.pos)\r\n right.left = node\r\n node = right\r\n else:\r\n node = nodes.BinaryOpNode(token.symbol, left, right, pos=token.pos)\r\n else:\r\n node = nodes.BinaryOpNode(token.symbol, left, right, pos=token.pos)\r\n\r\n return (node, i)\r\n\r\n def read_func_call(index) -> Tuple[nodes.FuncCallNode, int]:\r\n name_token = token_list[index]\r\n index += 2 # skip '('\r\n\r\n token_count = len(token_list)\r\n\r\n node = None\r\n i = index\r\n args = []\r\n\r\n while i < token_count and not is_close_bracket(token_list[i]):\r\n node, i = read_exp_chain(i)\r\n args.append(node)\r\n if i < token_count and is_comma(token_list[i]):\r\n i += 1\r\n else:\r\n break\r\n\r\n if i < token_count and is_close_bracket(token_list[i]):\r\n func_node = nodes.FuncCallNode(name_token.name, args, pos=name_token.pos)\r\n return (func_node, i + 1)\r\n else:\r\n raise ParsingException(\"unclose func call\", name_token.pos)\r\n\r\n\r\n node, i = read_exp_chain(0)\r\n\r\n if i < len(token_list):\r\n last_token = token_list[i]\r\n if is_close_bracket(last_token):\r\n raise ParsingException(\"unmatch ')'\", last_token.pos)\r\n else:\r\n raise ParsingException(\"unexpected token\", last_token.pos)\r\n else:\r\n return node", "def call_node_infer_type(node):\n infer_out = infer_type(node)\n out_type = infer_out._checked_type_\n if isinstance(out_type, TensorType):\n types = [out_type]\n elif isinstance(out_type, TupleType):\n types = list(out_type.fields)\n else:\n raise RuntimeError(f\"Unsupported output type {type(out_type)} in operator {node.op.name}\")\n\n return types", "def auto_convert(self):\n nodes_converted = []\n for node_type in self.conversion_spec_sheet:\n print('searching for: %s' % node_type)\n found_nodes = self.list_nodes(node_type)\n print('found: %s nodes' % len(found_nodes))\n for node in found_nodes:\n new_node = self.convert(node)\n nodes_converted.append([node, new_node])\n\n return nodes_converted", "def report(self, node_types: List[str] = None):\n for node_type, results in self.nodes.items():\n if node_types is not None and node_type in node_types:\n print(node_type)\n pprint(results)" ]
[ "0.6018786", "0.5674755", "0.56699795", "0.55886114", "0.5423717", "0.5258556", "0.52495664", "0.5247424", "0.5180887", "0.5121855", "0.51158464", "0.50840604", "0.50553924", "0.5054486", "0.5041273", "0.5030835", "0.5007947", "0.5004885", "0.4962244", "0.49611312", "0.4956788", "0.4915231", "0.49143824", "0.49042895", "0.48806015", "0.4880175", "0.48677826", "0.4863045", "0.48586735", "0.48576102" ]
0.5898406
1
Check whether the next part of the potential expression contains a unary negation.
def _has_unary_negation(self, child_types, expression): return len(expression) in [0,2] and child_types[0] == '-'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_negated(x) -> bool:\n return not (x & 1 == 0)", "def _negation_op(spec, expression):", "def no_operators(expression):\n OPERATORS = set('+-*/')\n for i in expression:\n if i in OPERATORS:\n return True\n raise NotValidExpression('Not a valid expression, no operators')", "def _is_unary_op(op):\n if op.type == TokenType.BitwiseNot:\n return True\n return False", "def is_unary(s):\n return s == '~'", "def negative(word: str) -> bool:\n\n negatives = ['no', 'negative', 'nah']\n return negatives.__contains__(word)", "def is_nonnegative(self, a):\n return a >= 0", "def is_neg_ctwo(x):\n return True if x[0] == '1' else False", "def negated(input_words, include_nt=True):\n input_words = [str(w).lower() for w in input_words]\n neg_words = []\n neg_words.extend(NEGATE)\n for word in neg_words:\n if word in input_words:\n return True\n if include_nt:\n for word in input_words:\n if \"n't\" in word:\n return True\n if \"least\" in input_words:\n i = input_words.index(\"least\")\n if i > 0 and input_words[i - 1] != \"at\":\n return True\n return False", "def lnot_expr(self, matches):\n subexpr_val = self.evaluate(matches.children[0])\n return self.bool_to_int(subexpr_val != 0)", "def negation_check(self,sentence):", "def is_negative(self):\n return (self._num < 0)", "def test_unary_op_support():\n check_peval_expression(\"+(2)\", {}, \"2\", fully_evaluated=True, expected_value=2)\n check_peval_expression(\"-(-3)\", {}, \"3\", fully_evaluated=True, expected_value=3)\n check_peval_expression_bool(\"not 0\", {}, True)\n check_peval_expression(\"~(-4)\", {}, \"3\", fully_evaluated=True, expected_value=3)", "def is_nonpositive(self, a):\n return a <= 0", "def istrue(self):\n return has_pos_and_neg(self.literals)", "def isNegative(phrase):\n return bool(re.search(r'\\b(nie|stop|koniec|odmowa|odmawiam)\\b', phrase, re.IGNORECASE))", "def negations(self) -> str:", "def notpexpr(*disallowed_heads):\n return some(lambda x: not (\n isinstance(x, HyExpression) and\n x and\n isinstance(x[0], HySymbol) and\n x[0] in disallowed_heads))", "def no(seq, pred=None):\n for elem in ifilter(pred, seq):\n return False\n return True", "def is_negation(bigram):\n\n\tnegation_words = ['not','never']\n\n\tbigram_part_1 = bigram[0]\n\tbigram_part_2 = bigram[1]\n\n\tif bigram_part_1.split('/')[0] in negation_words:\n\t\treturn True\n\n\treturn False", "def isExcluded(self, word):\n #print word\n return ((self.isExcludedWord(word) != False) \n or (self.isMeasure(word) != False) \n or (self.isAllDigits(word) != False) \n or (self.isShortWord(word) != False))", "def test_textNotOperator(self):\n xp = XPathQuery(\"/foo[not(@nosuchattrib)]\")\n self.assertEqual(xp.matches(self.e), True)", "def containseverything(self) -> bool:\n return self.isinfinite() and self.complement().isempty()", "def check_for_grammatical_negation(t_node):\n \n # Look for T det \"no\" pattern\n det_edges = [edge for edge in t_node.outedges if edge.dep == 'det']\n for det_edge in det_edges:\n if det_edge.to_node.lemma == \"no\":\n return True\n \n # Look for T neg pattern\n neg_edges = [edge for edge in t_node.outedges if edge.dep == 'neg']\n if neg_edges:\n return True\n \n # If all patterns above fail, then we assume there is no negation\n return False", "def is_neg_unate(self, vs=None):\n vs = self._expect_vars(vs)\n basis = self.support - set(vs)\n maxcov = [PC_ONE] * (1 << len(basis))\n # Test whether table entries are monotonically decreasing\n for cf in self.iter_cofactors(vs):\n for i, item in enumerate(cf.pcdata):\n if maxcov[i] == PC_ZERO and item == PC_ONE:\n return False\n maxcov[i] = item\n return True", "def is_negative(self, a):\n return a < 0", "def is_Negative(self):\n return self.signature() < 0", "def negate(self):\n self.formula = '!(' + self.formula + ')'", "def _is_minus1(expr):\r\n try:\r\n v = get_scalar_constant_value(expr)\r\n return numpy.allclose(v, -1)\r\n except NotScalarConstantError:\r\n return False", "def __neg__(self):\n return self.negated()" ]
[ "0.7028451", "0.6717068", "0.6701668", "0.6684899", "0.6622539", "0.64245415", "0.63324", "0.62940043", "0.62915003", "0.6271314", "0.62570894", "0.624976", "0.62437516", "0.62413335", "0.62379885", "0.62241846", "0.6194157", "0.6190811", "0.6138865", "0.6043831", "0.6005857", "0.5983983", "0.5975686", "0.5966234", "0.59569913", "0.5931653", "0.5876231", "0.5870992", "0.5861365", "0.585007" ]
0.76073074
0
retries creation of kafka consumer connections until it succeeds or times out
def try_creating_kafka_consumer(broker, broker_port, topic, consumer_group): retries = 8 for i in range(retries): try: return KafkaConsumer(topic, group_id=consumer_group, bootstrap_servers=[f'{broker}:{broker_port}']) except errors.NoBrokersAvailable: logging.error("attempt number: " + str(i + 1) + " broker: " + broker + ":" + str(broker_port)) sleep(10) raise errors.NoBrokersAvailable
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wait_for_kafka_connection(delay=5):\n while True:\n try:\n kafka = KafkaProducer(bootstrap_servers=KAFKA_BROKERS)\n LOGGER.info('Connection to kafka cluster established')\n kafka.close()\n break\n except:\n LOGGER.error('Can not connect to kafka cluster')\n time.sleep(delay)", "def start_exited_consumers(kafka, p):\n for i in TOPICS[\"data\"]:\n kafka.initialize_consumer(topic=i[\"topic\"], config=i[\"config\"], partition=int(i[\"partition\"]))", "def test_exp_backoff():\n stream = ReconnectingTweetStream('user', 'pass', initial_wait=1, max_wait=5,\n error_cb=error_callback)\n # A connection failure should happen automatically because of patch\n assert_raises(ConnectionError, stream.next)\n # By now, callback should have been invoked 3 times (1s, 2s, 4s)\n assert callback_invoked == 3", "def verify_consumer():\n\n # Consumer config\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': 'test.py',\n 'session.timeout.ms': 6000,\n 'enable.auto.commit': False,\n 'api.version.request': api_version_request,\n 'on_commit': print_commit_result,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n # Create consumer\n c = confluent_kafka.Consumer(**conf)\n\n def print_wmark(consumer, parts):\n # Verify #294: get_watermark_offsets() should not fail on the first call\n # This is really a librdkafka issue.\n for p in parts:\n wmarks = consumer.get_watermark_offsets(parts[0])\n print('Watermarks for %s: %s' % (p, wmarks))\n\n # Subscribe to a list of topics\n c.subscribe([topic], on_assign=print_wmark)\n\n max_msgcnt = 100\n msgcnt = 0\n\n first_msg = None\n\n while True:\n # Consume until EOF or error\n\n # Consume message (error()==0) or event (error()!=0)\n msg = c.poll()\n if msg is None:\n raise Exception('Got timeout from poll() without a timeout set: %s' % msg)\n\n if msg.error():\n if msg.error().code() == confluent_kafka.KafkaError._PARTITION_EOF:\n print('Reached end of %s [%d] at offset %d' %\n (msg.topic(), msg.partition(), msg.offset()))\n break\n else:\n print('Consumer error: %s: ignoring' % msg.error())\n break\n\n tstype, timestamp = msg.timestamp()\n headers = msg.headers()\n if headers:\n example_header = headers\n\n msg.set_headers([('foo', 'bar')])\n assert msg.headers() == [('foo', 'bar')]\n\n print('%s[%d]@%d: key=%s, value=%s, tstype=%d, timestamp=%s headers=%s' %\n (msg.topic(), msg.partition(), msg.offset(),\n msg.key(), msg.value(), tstype, timestamp, headers))\n\n if first_msg is None:\n first_msg = msg\n\n if (msgcnt == 11):\n parts = c.assignment()\n print('Pausing partitions briefly')\n c.pause(parts)\n exp_None = c.poll(timeout=2.0)\n assert exp_None is None, \"expected no messages during pause, got %s\" % exp_None\n print('Resuming partitions')\n c.resume(parts)\n\n if (msg.offset() % 5) == 0:\n # Async commit\n c.commit(msg, asynchronous=True)\n elif (msg.offset() % 4) == 0:\n offsets = c.commit(msg, asynchronous=False)\n assert len(offsets) == 1, 'expected 1 offset, not %s' % (offsets)\n assert offsets[0].offset == msg.offset()+1, \\\n 'expected offset %d to be committed, not %s' % \\\n (msg.offset(), offsets)\n print('Sync committed offset: %s' % offsets)\n\n msgcnt += 1\n if msgcnt >= max_msgcnt:\n print('max_msgcnt %d reached' % msgcnt)\n break\n\n assert example_header, \"We should have received at least one header\"\n assert example_header == [(u'foo1', 'bar'), (u'foo1', 'bar2'), (u'foo2', '1')]\n\n # Get current assignment\n assignment = c.assignment()\n\n # Get cached watermark offsets\n # Since we're not making use of statistics the low offset is not known so ignore it.\n lo, hi = c.get_watermark_offsets(assignment[0], cached=True)\n print('Cached offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query broker for offsets\n lo, hi = c.get_watermark_offsets(assignment[0], timeout=1.0)\n print('Queried offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query offsets for timestamps by setting the topic partition offset to a timestamp. 123456789000 + 1\n topic_partions_to_search = list(map(lambda p: confluent_kafka.TopicPartition(topic, p, 123456789001), range(0, 3)))\n print(\"Searching for offsets with %s\" % topic_partions_to_search)\n\n offsets = c.offsets_for_times(topic_partions_to_search, timeout=1.0)\n print(\"offsets_for_times results: %s\" % offsets)\n\n verify_consumer_seek(c, first_msg)\n\n # Close consumer\n c.close()\n\n # Start a new client and get the committed offsets\n c = confluent_kafka.Consumer(**conf)\n offsets = c.committed(list(map(lambda p: confluent_kafka.TopicPartition(topic, p), range(0, 3))))\n for tp in offsets:\n print(tp)\n\n c.close()", "def test_start_sameconnector_twice_with_noreconnecting_on_failure(self):\n\n yield self.connect('127.0.0.1', self.pbPort)\n\n localConfig = copy.copy(self.defaultConfig)\n localConfig.reconnectOnConnectionFailure = False\n yield self.add(localConfig)\n yield self.start(localConfig.id)\n startRet = yield self.start(localConfig.id)\n\n self.assertEqual(True, startRet)\n\n yield self.stopall()\n\n # Give a grace time for stopping\n yield waitFor(0.2)", "def test_consumer(self):\n try:\n consumer = Consumer()\n consumer.poll()\n except (Exception) as error:\n logging.error(\"\\n\\nConsumer's connection to\"\n \"kafka failed with error: {}\\n\\n\".format(error))\n assert(False)", "def retry_connect(redis_cfg, tries=300, base_delay=4.):\n for i in range(tries):\n try:\n r = redis.StrictRedis(**redis_cfg)\n r.ping()\n return r\n except redis.ConnectionError as e:\n if i == tries - 1:\n raise\n else:\n delay = base_delay * (1 + (os.getpid() % 10) / 9)\n print(f'WARNING: could not connect to {redis_cfg}. Retrying after {delay} sec ({i+2}/{tries}). Error {e}')\n time.sleep(delay)", "def test_startconnector_with_noretry_on_con_failure(self):\n\n yield self.connect('127.0.0.1', self.pbPort)\n\n localConfig = copy.copy(self.defaultConfig)\n localConfig.reconnectOnConnectionFailure = False\n yield self.add(localConfig)\n yield self.start(localConfig.id)\n\n # It takes a moment to stop the service after a connection failure\n while True:\n ssRet = yield self.service_status(localConfig.id)\n if ssRet != 1:\n break;\n else:\n time.sleep(1)\n\n self.assertEqual(0, ssRet)\n\n yield self.stop(localConfig.id)", "def test_producer_stop_waiting_to_retry(self):\n clock = MemoryReactorClock()\n client = Mock(reactor=clock)\n f = Failure(BrokerNotAvailableError())\n ret = [fail(f)]\n client.send_produce_request.side_effect = ret\n client.topic_partitions = {self.topic: [0, 1, 2, 3]}\n client.metadata_error_for_topic.return_value = False\n msgs = [self.msg(\"one\"), self.msg(\"two\")]\n batch_n = 2\n\n producer = Producer(client, batch_every_n=batch_n, batch_send=True)\n d = producer.send_messages(self.topic, msgs=msgs)\n # At first, there's no result. Have to retry due to first failure\n self.assertNoResult(d)\n # Advance the clock, some, but not enough to retry\n clock.advance(producer._retry_interval / 2)\n # Stop the producer before the retry\n producer.stop()\n self.failureResultOf(d, tid_CancelledError)", "def connect(self, num_retry_attempts=1):\n pass", "def verify_batch_consumer_performance():\n\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': uuid.uuid1(),\n 'session.timeout.ms': 6000,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n c = confluent_kafka.Consumer(**conf)\n\n def my_on_assign(consumer, partitions):\n print('on_assign:', len(partitions), 'partitions:')\n for p in partitions:\n print(' %s [%d] @ %d' % (p.topic, p.partition, p.offset))\n consumer.assign(partitions)\n\n def my_on_revoke(consumer, partitions):\n print('on_revoke:', len(partitions), 'partitions:')\n for p in partitions:\n print(' %s [%d] @ %d' % (p.topic, p.partition, p.offset))\n consumer.unassign()\n\n c.subscribe([topic], on_assign=my_on_assign, on_revoke=my_on_revoke)\n\n max_msgcnt = 1000000\n bytecnt = 0\n msgcnt = 0\n batch_size = 1000\n\n print('Will now consume %d messages' % max_msgcnt)\n\n if with_progress:\n bar = Bar('Consuming', max=max_msgcnt,\n suffix='%(index)d/%(max)d [%(eta_td)s]')\n else:\n bar = None\n\n while msgcnt < max_msgcnt:\n # Consume until we hit max_msgcnt\n\n msglist = c.consume(num_messages=batch_size, timeout=20.0)\n\n for msg in msglist:\n if msg.error():\n if msg.error().code() == confluent_kafka.KafkaError._PARTITION_EOF:\n # Reached EOF for a partition, ignore.\n continue\n else:\n raise confluent_kafka.KafkaException(msg.error())\n\n bytecnt += len(msg)\n msgcnt += 1\n\n if bar is not None and (msgcnt % 10000) == 0:\n bar.next(n=10000)\n\n if msgcnt == 1:\n t_first_msg = time.time()\n\n if bar is not None:\n bar.finish()\n\n if msgcnt > 0:\n t_spent = time.time() - t_first_msg\n print('%d messages (%.2fMb) consumed in %.3fs: %d msgs/s, %.2f Mb/s' %\n (msgcnt, bytecnt / (1024*1024), t_spent, msgcnt / t_spent,\n (bytecnt / t_spent) / (1024*1024)))\n\n print('closing consumer')\n c.close()", "def verify_batch_consumer():\n\n # Consumer config\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': 'test.py',\n 'session.timeout.ms': 6000,\n 'enable.auto.commit': False,\n 'api.version.request': api_version_request,\n 'on_commit': print_commit_result,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n # Create consumer\n c = confluent_kafka.Consumer(**conf)\n\n # Subscribe to a list of topics\n c.subscribe([topic])\n\n max_msgcnt = 1000\n batch_cnt = 100\n msgcnt = 0\n\n while msgcnt < max_msgcnt:\n # Consume until we hit max_msgcnt\n\n # Consume messages (error()==0) or event (error()!=0)\n msglist = c.consume(batch_cnt, 10.0)\n assert len(msglist) == batch_cnt, 'expected %d messages, not %d' % (batch_cnt, len(msglist))\n\n for msg in msglist:\n if msg.error():\n print('Consumer error: %s: ignoring' % msg.error())\n continue\n\n tstype, timestamp = msg.timestamp()\n print('%s[%d]@%d: key=%s, value=%s, tstype=%d, timestamp=%s' %\n (msg.topic(), msg.partition(), msg.offset(),\n msg.key(), msg.value(), tstype, timestamp))\n\n if (msg.offset() % 5) == 0:\n # Async commit\n c.commit(msg, asynchronous=True)\n elif (msg.offset() % 4) == 0:\n offsets = c.commit(msg, asynchronous=False)\n assert len(offsets) == 1, 'expected 1 offset, not %s' % (offsets)\n assert offsets[0].offset == msg.offset()+1, \\\n 'expected offset %d to be committed, not %s' % \\\n (msg.offset(), offsets)\n print('Sync committed offset: %s' % offsets)\n\n msgcnt += 1\n\n print('max_msgcnt %d reached' % msgcnt)\n\n # Get current assignment\n assignment = c.assignment()\n\n # Get cached watermark offsets\n # Since we're not making use of statistics the low offset is not known so ignore it.\n lo, hi = c.get_watermark_offsets(assignment[0], cached=True)\n print('Cached offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query broker for offsets\n lo, hi = c.get_watermark_offsets(assignment[0], timeout=1.0)\n print('Queried offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Close consumer\n c.close()\n\n # Start a new client and get the committed offsets\n c = confluent_kafka.Consumer(**conf)\n offsets = c.committed(list(map(lambda p: confluent_kafka.TopicPartition(topic, p), range(0, 3))))\n for tp in offsets:\n print(tp)\n\n c.close()", "def test_describe_consumer_group_exists(kafka_admin_client, kafka_consumer_factory, topic):\n consumers = {}\n stop = {}\n threads = {}\n random_group_id = 'test-group-' + random_string(6)\n group_id_list = [random_group_id, random_group_id + '_2']\n generations = {group_id_list[0]: set(), group_id_list[1]: set()}\n def consumer_thread(i, group_id):\n assert i not in consumers\n assert i not in stop\n stop[i] = Event()\n consumers[i] = kafka_consumer_factory(group_id=group_id)\n while not stop[i].is_set():\n consumers[i].poll(20)\n consumers[i].close()\n consumers[i] = None\n stop[i] = None\n\n num_consumers = 3\n for i in range(num_consumers):\n group_id = group_id_list[i % 2]\n t = Thread(target=consumer_thread, args=(i, group_id,))\n t.start()\n threads[i] = t\n\n try:\n timeout = time() + 35\n while True:\n for c in range(num_consumers):\n\n # Verify all consumers have been created\n if c not in consumers:\n break\n\n # Verify all consumers have an assignment\n elif not consumers[c].assignment():\n break\n\n # If all consumers exist and have an assignment\n else:\n\n info('All consumers have assignment... checking for stable group')\n # Verify all consumers are in the same generation\n # then log state and break while loop\n\n for consumer in consumers.values():\n generations[consumer.config['group_id']].add(consumer._coordinator._generation.generation_id)\n\n is_same_generation = any([len(consumer_generation) == 1 for consumer_generation in generations.values()])\n\n # New generation assignment is not complete until\n # coordinator.rejoining = False\n rejoining = any([consumer._coordinator.rejoining\n for consumer in list(consumers.values())])\n\n if not rejoining and is_same_generation:\n break\n else:\n sleep(1)\n assert time() < timeout, \"timeout waiting for assignments\"\n\n info('Group stabilized; verifying assignment')\n output = kafka_admin_client.describe_consumer_groups(group_id_list)\n assert len(output) == 2\n consumer_groups = set()\n for consumer_group in output:\n assert(consumer_group.group in group_id_list)\n if consumer_group.group == group_id_list[0]:\n assert(len(consumer_group.members) == 2)\n else:\n assert(len(consumer_group.members) == 1)\n for member in consumer_group.members:\n assert(member.member_metadata.subscription[0] == topic)\n assert(member.member_assignment.assignment[0][0] == topic)\n consumer_groups.add(consumer_group.group)\n assert(sorted(list(consumer_groups)) == group_id_list)\n finally:\n info('Shutting down %s consumers', num_consumers)\n for c in range(num_consumers):\n info('Stopping consumer %s', c)\n stop[c].set()\n threads[c].join()\n threads[c] = None", "def _mp_consume(client, group, topic, queue, size, events, **consumer_options):\n\n # Initial interval for retries in seconds.\n interval = 1\n while not events.exit.is_set():\n try:\n # Make the child processes open separate socket connections\n client.reinit()\n\n # We will start consumers without auto-commit. Auto-commit will be\n # done by the master controller process.\n consumer = SimpleConsumer(client, group, topic,\n auto_commit=False,\n auto_commit_every_n=None,\n auto_commit_every_t=None,\n **consumer_options)\n\n # Ensure that the consumer provides the partition information\n consumer.provide_partition_info()\n\n while True:\n # Wait till the controller indicates us to start consumption\n events.start.wait()\n\n # If we are asked to quit, do so\n if events.exit.is_set():\n break\n\n # Consume messages and add them to the queue. If the controller\n # indicates a specific number of messages, follow that advice\n count = 0\n\n message = consumer.get_message()\n if message:\n while True:\n try:\n queue.put(message, timeout=FULL_QUEUE_WAIT_TIME_SECONDS)\n break\n except queue.Full:\n if events.exit.is_set():\n break\n\n count += 1\n\n # We have reached the required size. The controller might have\n # more than what he needs. Wait for a while.\n # Without this logic, it is possible that we run into a big\n # loop consuming all available messages before the controller\n # can reset the 'start' event\n if count == size.value:\n events.pause.wait()\n\n else:\n # In case we did not receive any message, give up the CPU for\n # a while before we try again\n time.sleep(NO_MESSAGES_WAIT_TIME_SECONDS)\n\n consumer.stop()\n\n except KafkaError as e:\n # Retry with exponential backoff\n log.error(\n \"Problem communicating with Kafka (%s), retrying in %d seconds...\" % (e, interval))\n time.sleep(interval)\n interval = interval * 2 if interval * 2 < MAX_BACKOFF_SECONDS else MAX_BACKOFF_SECONDS", "async def consumer_loop(\n uri: str,\n topic: str,\n ssl_context: SSLContext,\n writers: List[Callable[[ConsumerPayload], Awaitable[int]]],\n) -> int:\n log.info(\"consumer: starting\")\n if len(writers) < 1:\n raise ValueError(\"there must be at least one writer passed to consumer_loop.\")\n queue: asyncio.Queue[ConsumerPayload] = asyncio.Queue()\n async with AIOKafkaConsumer(\n topic,\n bootstrap_servers=uri,\n security_protocol=\"SSL\",\n ssl_context=ssl_context,\n group_id=DEFAULT_GROUP_ID,\n ) as consumer:\n await asyncio.gather(\n decoder(queue, consumer), writer_wrapper(queue=queue, writers=writers)\n )\n log.info(\"consumer: exiting\")\n return 0", "def testGoodRetry(self):\n self.p = start_short_timeout_app_process()\n gateway = JavaGateway()\n connections = gateway._gateway_client.deque\n try:\n # Call #1\n gateway.jvm.System.currentTimeMillis()\n str_connection = str(connections[0])\n\n # Call #2 after, should not create new connections if the system is\n # not too slow :-)\n gateway.jvm.System.currentTimeMillis()\n self.assertEqual(1, len(connections))\n str_connection2 = str(connections[0])\n self.assertEqual(str_connection, str_connection2)\n\n sleep(0.5)\n gateway.jvm.System.currentTimeMillis()\n self.assertEqual(1, len(connections))\n str_connection3 = str(connections[0])\n # A new connection was automatically created.\n self.assertNotEqual(str_connection, str_connection3)\n except Py4JError:\n self.fail(\"Should retry automatically by default.\")\n finally:\n gateway.shutdown()\n self.p.join()", "def test_connection(max_attempts=5, interval_secs=1.0):\n num_attempts = 1\n while True:\n try:\n with engine.connect():\n return\n\n except OperationalError:\n if num_attempts > max_attempts:\n msg = 'Connecting to database failed after {} attempts'.format(\n max_attempts)\n print(msg)\n raise\n # Retry if connection refused\n num_attempts += 1\n time.sleep(interval_secs)", "def _connect(self):\n for attempt in range(1, self.num_attempts + 1):\n try:\n conn = self.rabbitmq_context.get_connection(self.timeout)\n chan = conn.channel()\n return (conn, chan)\n except AMQPError as ex:\n if attempt >= self.num_attempts:\n if self.ignore_connection_failure:\n raise ex\n else:\n self.fail(\n \"Could not access RabbitMQ host {0} because {1}\"\n .format(self.rabbitmq_context.host, repr(ex)))\n else:\n time.sleep(self.seconds_between_attempts)", "def get_kafka_client(num_retries=20, retry_sleep=1):\n kafka_hosts = runtime.get_active_config('kafka_hosts').values()\n kafka_port = runtime.get_active_config('kafka_port')\n assert len(kafka_hosts) > 0, 'Missing required configuration: kafka_hosts'\n connect_string = ','.join(map(lambda h: h + ':{0},'.format(kafka_port), kafka_hosts)).rstrip(',')\n # wait for at least one broker to come up\n if not wait_for_server(kafka_hosts[0], kafka_port, 30):\n raise Exception('Unable to connect to Kafka broker: {0}:{1}'.format(kafka_hosts[0], kafka_port))\n return KafkaClient(connect_string)", "def reconnect(self):\n self.test_cmd()\n if not self.check_network: \n self.reset()\n attempt=0\n while not self.check_network and attempt<self.retries:\n self.full_reset()\n attempt+=1", "def connect_never_retry():\n try:\n messaging_service = MessagingService.builder().from_properties(boot.broker_properties()) \\\n .with_reconnection_retry_strategy(RetryStrategy.never_retry()).build()\n future = messaging_service.connect_async()\n\n return future.result()\n\n except PubSubPlusClientError as exception:\n raise exception\n\n finally:\n messaging_service.disconnect_async()", "def test_create_consumer(self):\n try:\n test_consumer = TestConsumer(self.msg_queue, self.queue_lock, self.topic, self.properties_file)\n except Exception as e:\n self.fail(f\"test_create_consumer() failed with exception: {e}\")\n\n try:\n test_consumer.start()\n except Exception as e:\n self.fail(f\"test_consumer.start() in test_create_consumer() failed with exception: {e}\")\n\n # Sleep for a couple seconds to allow the thread to come up.\n time.sleep(2)\n self.assertEqual(3, threading.active_count()) # Main thread, consumer thread, consumer-group hear-beat daemon.\n\n test_consumer.stop()\n test_consumer.join()\n self.assertEqual(2, threading.active_count())", "async def create_unconsumed_topics():\n # ################################################ #\n # TODO: remove these once there is someone consuming the topics\n unconsumed_topics = ['dummy']\n\n logger.warning(\n f'Creating topics on the publisher: {unconsumed_topics} due to lack of consumers. '\n 'Remove them once there are consumers'\n )\n for topic in unconsumed_topics:\n await kafka.topic(topic).maybe_declare()\n\n # ################################################ #", "def check_kafka_connection(host, port):\n conn = BrokerConnection(host, int(port), socket.AF_UNSPEC)\n connected = conn.connect_blocking(timeout=1)\n if connected:\n conn.close()\n return connected", "async def start(self) -> None:\n while self.producer is None:\n try:\n self.producer = self._producer_factory(\n bootstrap_servers=self.bootstrap_servers,\n ssl_cafile=self.ssl_cafile,\n ssl_certfile=self.ssl_certfile,\n ssl_keyfile=self.ssl_keyfile,\n security_protocol='SSL',\n value_serializer=lambda v: json.dumps(v).encode('utf-8'),\n )\n except kafka.errors.NoBrokersAvailable:\n await trio.sleep(self.connect_interval_secs)\n else:\n logger.info('kafka-ready: %s', self.producer)\n async with self.has_producer:\n self.has_producer.notify_all()", "def _recover_network_failure(self):\n if self.auto_reconnect and not self._is_closing:\n connected = False\n while not connected:\n log_msg = \"* ATTEMPTING RECONNECT\"\n if self._retry_new_version:\n log_msg = \"* RETRYING DIFFERENT DDP VERSION\"\n self.ddpsocket._debug_log(log_msg)\n time.sleep(self.auto_reconnect_timeout)\n self._init_socket()\n try:\n self.connect()\n connected = True\n if self._retry_new_version:\n self._retry_new_version = False\n else:\n self._is_reconnecting = True\n except (socket.error, WebSocketException):\n pass", "def _setup_consumer(self):\n # <WTF> https://github.com/dpkp/kafka-python/issues/601\n self.available_topics = self.client.topics()\n # </WTF>\n\n # might as well use it\n assert self.topic in self.available_topics\n\n if (self.start_params is None) != (self.end_params is None):\n raise ValueError(\"Both start and end params must be set or both must be None\")\n\n if self.start_params is None:\n # setup partitions to read through\n # TODO not checked with multiple partitions since inheriting from foxglove\n # An offset is assigned to make repeatability (via a locking file) possible later on.\n # and it's easier to terminate the fetch loop this way.\n p_id = self.client.partitions_for_topic(self.topic)\n topic_partitions = [TopicPartition(topic=self.topic, partition=p) for p in list(p_id)]\n starts = self.client.beginning_offsets(topic_partitions)\n ends = self.client.end_offsets(topic_partitions)\n\n self.start_p_offsets = {\n tp: OffsetAndTimestamp(offset=offset, timestamp=None)\n for tp, offset in starts.items()\n }\n self.end_p_offsets = {\n tp: OffsetAndTimestamp(offset=offset - 1, timestamp=None)\n for tp, offset in ends.items()\n }\n\n else:\n # TODO - this code was inherited from Foxglove and hasn't be checked through\n # setup start and end partitions and offsets\n # self.client.seek_to_beginning()\n # datetime is only start/end implemented\n assert isinstance(self.start_params, datetime) and isinstance(self.end_params, datetime)\n start = int(self.start_params.timestamp() * 1000)\n end = int(self.end_params.timestamp() * 1000)\n\n partitions = self.client.partitions_for_topic(self.topic)\n tx = {TopicPartition(topic=self.topic, partition=p): start for p in list(partitions)}\n self.start_p_offsets = self.client.offsets_for_times(tx)\n\n # if you give a timestamp after the last record it returns None\n for tp, offset_details in self.start_p_offsets.items():\n if offset_details is None:\n raise ValueError(\"Start date outside of available messages\")\n\n tx = {TopicPartition(topic=self.topic, partition=p): end for p in list(partitions)}\n self.end_p_offsets = self.client.offsets_for_times(tx)\n\n # as above - out of range, for end offset give something useful\n for tp, offset_details in self.end_p_offsets.items():\n if offset_details is None:\n # go to last message. I'm not 100% sure this is correct\n end_offsets = self.client.end_offsets([tp])\n offset = end_offsets[tp] - 1\n self.end_p_offsets[tp] = OffsetAndTimestamp(offset=offset, timestamp=None)", "def setup_amq_kafka_connect(self):\n try:\n kafka_connect = templating.load_yaml(os.path.join(self.dir, self.amq_kafka_connect_yaml))\n self.kafka_connect = OCS(**kafka_connect)\n self.kafka_connect.create()\n except(CommandFailed, CalledProcessError) as cf:\n log.error('Failed during setup of AMQ KafkaConnect')\n raise cf\n\n if self.is_amq_pod_running(pod_pattern=\"my-connect-cluster-connect\"):\n return self.kafka_connect\n else:\n raise ResourceWrongStatusException(\"my-connect-cluster-connect pod is not getting to running state\")", "async def _auto_reconnect(self):\n while True:\n await asyncio.sleep(10)\n try:\n await self.connect()\n return\n except CannotConnect:\n pass", "def get_hosts_fanout_retry(self, target, listener_type):" ]
[ "0.6611963", "0.6494159", "0.63523865", "0.6156412", "0.61105436", "0.61080486", "0.59963095", "0.5959119", "0.59372276", "0.5926103", "0.58610475", "0.5830046", "0.5807465", "0.5799397", "0.57653743", "0.57564795", "0.56917095", "0.56620485", "0.5633267", "0.5633075", "0.5621555", "0.56074697", "0.5557397", "0.5533658", "0.55192107", "0.5499815", "0.54921967", "0.5484185", "0.5481266", "0.5463644" ]
0.7616249
0
Calculates the R^2 value for the GWR model.
def r2_GWR(GWRMod): tss = np.sum((GWRMod.y - GWRMod.y_mean)**2) r2 = 1.0 - GWRMod.res2/tss return r2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_R2(self):\n\n d1 = self.T - self.Y\n d2 = self.T - self.T.mean()\n\n self.r2 = 1 - d1.dot(d1) / d2.dot(d2)\n self.r2 = format(self.r2, '.3f')\n\n print \"\"\n print \"R2:\", self.r2", "def _calc_r2(self):\n sse = np.sum((self.data.y - self.predict(self.data.x))**2)\n sst = np.sum((self.data.y - self.data.y.mean())**2)\n return (1. - sse/sst)", "def calc_R2(Fasit, Model):\n # Calculating\n R2 = 1 - sum(np.power(Fasit - Model, 2)) / sum(np.power(Fasit - np.mean(Fasit), 2))\n return R2", "def r2(self) -> float:\n zx = (self.true - np.mean(self.true)) / np.std(self.true, ddof=1)\n zy = (self.predicted - np.mean(self.predicted)) / np.std(self.predicted, ddof=1)\n r = np.sum(zx * zy) / (len(self.true) - 1)\n return float(r ** 2)", "def get_r2_score(self):\n return self.r2_score", "def R2_ScoreFunction(y_data, y_model):\n\tcounter = np.sum((y_data-y_model)**2)\n\tdenominator = np.sum((y_data-np.mean(y_data))**2)\n\tR_2 = 1 - (counter/denominator)\n\n\treturn R_2", "def getR(self):\n # Reynolds number uses the absolute value of the velocity\n V = abs(self.V)\n return (V * self.D) / self.v # formula for Reynolds number", "def ar2_GWR(GWRMod): \r\n tss = np.sum((GWRMod.y - GWRMod.y_mean)**2)\r\n n = GWRMod.nObs # (scalar) number of observations\r\n if GWRMod.tr_S >= GWRMod.tr_STS:\t\t\r\n\tdof_res = GWRMod.nObs-2.0*GWRMod.tr_S+GWRMod.tr_STS\r\n else:\r\n\tdof_res = GWRMod.nObs-GWRMod.tr_S\r\n ar2_result = 1.0 - GWRMod.res2 / tss * ( n - 1.0) / (dof_res - 1.0)\r\n \r\n return ar2_result", "def r2_score(self):\n print('R^2 (coefficient of determination) regression score function: ' +\n str(r2_score(self.model.dataset.get_y_test(), self.model.get_predicted())))", "def r2(t, y):\n\treturn r2_score(t, y)", "def get_R(self):\n return self.R_min * tf.exp(self.R_ * self.log_R_range)", "def radial2(self) -> float:\n return self.distortion_coefficients[0]", "def r2_score(self, weights=None):\n\n if len(self.predicted) < 2:\n msg = \"R^2 score is not well-defined with less than two samples.\"\n warnings.warn(msg)\n return None\n\n if weights is None:\n weight = 1.\n else:\n weight = weights[:, np.newaxis]\n\n numerator = (weight * (self.true - self.predicted) ** 2).sum(axis=0,\n dtype=np.float64)\n denominator = (weight * (self.true - np.average(\n self.true, axis=0, weights=weights)) ** 2).sum(axis=0, dtype=np.float64)\n\n if numerator == 0.0:\n return None\n output_scores = _foo(denominator, numerator)\n\n return float(np.average(output_scores, weights=weights))", "def rsr(self) -> float:\n return float(self.rmse() / np.std(self.true))", "def _get_R(self, net_r_amp):\n return np.abs(net_r_amp)**2", "def pseudo_r2(self):\n y_reg = self.time_series(len(self.data))\n SSres = ((self.data - y_reg)**2).sum()\n SStot = ((self.data - self.data.mean())**2).sum()\n return 1 - SSres/SStot", "def r(self) -> float:\n return self._ohms.real", "def r2_OLS(OLSMod): \r\n y = OLSMod.y # (array) vector of dep observations (n x 1)\r\n mean_y = OLSMod.y_mean # (scalar) mean of dep observations\r\n utu = OLSMod.res2 # (scalar) residual sum of squares\r\n ss_tot = ((y - mean_y) ** 2).sum(0)\r\n r2 = 1-utu/ss_tot\r\n r2_result = r2[0]\r\n return r2_result", "def r2(y: np.ndarray, y_hat: np.ndarray) -> float:\n n = y.shape[0]\n sse = n * mse(y, y_hat)\n y_mean = np.mean(y)\n sst = np.sum((y - y_mean)**2)\n r2_value = 1 - sse / sst\n return r2_value", "def _R(self):\n return np.exp(self._log_R)", "def evaluate_r2(y_true, y_pred):\n\n r2_eval = r2_score(y_true, y_pred)\n\n return r2_eval", "def _rsq(self):\n return self._ss_reg / self._ss_tot", "def R(self):\n\t\treturn (arange(self.rbins) + 0.5) * (self.cbins - 0.5) / self.rbins", "def sigma2_RG(self):\n sigma = np.sqrt(self.cosmo.gs_spectral_moment(l=2,RG=self.RG))\n return sigma", "def residualNorm2(self):\n r2 = (np.dot(self.x,np.dot(self.AtA,self.x)-2.0*self.Atb) + self.btb)*self.scale\n if self.regularizationLambda > 0:\n r2 -= self.regularizationLambda*np.dot(self.x,self.x)\n return r2", "def R_squared(self):\n return 1 - ((self.y - self.y_hat(self.x))**2).sum() / ((self.y - self.y.mean())**2).sum()", "def _get_l2_reg(self) -> torch.Tensor:\n loss = 0\n for param in self.model.parameters():\n loss += (param ** 2).sum()\n return loss", "def adjusted_R_squared(R2, p, n):\n return 1 - (1 - R2)*(n-1)/(n-p-1)", "def test_regress_R2(self):\r\n x = [1.0, 2.0, 3.0, 4.0, 5.0]\r\n y = [2.1, 4.2, 5.9, 8.4, 9.6]\r\n result = regress_R2(x, y)\r\n self.assertFloatEqual(result, 0.99171419347896)", "def probaR2CondR1(self, r1, r2):\n\n if r1 == 0.0:\n if r2 == 0.0:\n return self.__alpha0 / self.__D0\n elif r2 == 1.0:\n return self.__beta / self.__D0\n else:\n return self.__eta * (1. - r2) / self.__D0\n\n elif r1 == 1.0:\n if r2 == 0.0:\n return self.__beta / self.__D1\n elif r2 == 1.0:\n return self.__alpha1 / self.__D1\n else:\n return self.__eta * r2 / self.__D1\n\n else:\n D = 1.5 + r1 - r1 * r1 \n if r2 == 0.0:\n return (1. - r1) / D\n elif r2 == 1.0:\n return r1 / D\n else:\n return (1. - np.abs(r1 - r2)) / D" ]
[ "0.7919789", "0.76424164", "0.7621829", "0.7487459", "0.71899205", "0.7047658", "0.69599354", "0.6907422", "0.6887155", "0.68140036", "0.67396015", "0.67070526", "0.6683979", "0.66635364", "0.65382195", "0.65051377", "0.64974743", "0.6475882", "0.64547855", "0.6451764", "0.63645494", "0.631485", "0.6273899", "0.6233264", "0.6203796", "0.6193825", "0.61770755", "0.61142814", "0.6096167", "0.6077285" ]
0.82065165
0
Populate a list with Edge objects, mapped from db.
def populate_edges(self, edges_list): edges = [] for edge in edges_list: source, target, weight = edge[4], edge[5], edge[6] freq, line, geom = edge[7], edge[1], edge[2] edges.append(Edge(source, target, weight, freq, line, geom)) self.edges = edges
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_edge_ist(self) -> EdgeList:\r\n return EdgeList(self)", "def create_edges(self, edge_json):\n self.edges = []\n for edge in edge_json:\n from_node = self.find_node(edge[\"from\"])\n to_node = self.find_node(edge[\"to\"])\n self.edges.append(Edge(from_node, to_node))\n self.clean_edges()\n self.all_edges = list(self.edges)", "def iter_edges(self) -> Iterable[EdgeTuple]:\n # FIXME: handle case where initializing with ddict data from query.\n # If implemented here (adding **d to the edge factory arguments), it\n # will always attempt to update the database on a per-read basis!\n return (\n (u, v, dict(self.edge_attr_dict_factory(self.network, u, v)))\n for u, v, d in self.network.edges.iter_edges()\n )", "def addEdgeList(self, edges):\n for e in edges:\n self.addEdge(e[0], e[1], e[2] if len(e) > 2 else None)", "def create_edgelist(self):\n self.edgelist = []\n \n for i in range(len(self.Adjmatrix)):\n for j in range(len(self.Adjmatrix)):\n if(self.Adjmatrix[i, j] == 1):\n middlex = 0.5*(self.x[i] + self.x[j])\n middley = 0.5*(self.y[i] + self.y[j])\n self.edgelist.append({\"start node\": i, \"end node\": j, \"link length\": self.Dismatrix[i, j], \"edgediameter\": self.edgediameter, \"middlex\": middlex, \"middley\": middley})", "def fetch_from_sqlite(self):\n conn = get_sqlite()\n c = conn.cursor()\n c.execute('SELECT * FROM vertices ORDER BY id')\n vertices =c.fetchall()\n c.execute('SELECT * FROM edges')\n edges =c.fetchall()\n conn.commit()\n\n self.graph.add_vertices(len(vertices))\n for one in vertices:\n id =int(one[0])\n self.graph.vs[id][\"name\"] = one[1]\n self.graph.vs[id][\"parent\"] = one[2]\n self.graph.vs[id][\"size\"] = one[3]\n self.graph.vs[id][\"last_modified\"] = one[4]\n self.graph.vs[id][\"last_accessed\"] = one[5]\n\n for one in edges:\n self.graph.add_edges([(one[0],one[1])])", "def __init__(self):\n self._list: List[Edge] = list()", "def __init__(self, edgelist):\n self.edge = edgelist\n if edgelist:\n self.update_node2edge()", "def __generate_edges(self):\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n edges.append({vertex, neighbour})\n return edges", "def populate_vertices(self, vertices_list):\n vertices = []\n for vertex in vertices_list:\n vertex_id = vertex[0]\n vertices.append(Vertex(vertex_id))\n self.vertices = vertices", "def build_edges(self):\n print(\"Constructing Edges.\")\n # -----------------------------------------\n # TODO: You should write this method!\n\n # Note: this method may take some time to run - it is likely to be O(N^2), and some lists have N = 10,000 words or more.\n # (I've had students decide that their program was \"broken\" and quit it before this process finished... every time,\n # not realizing that the program was working hard behind the scenes.)\n # I recommend that you keep track of the number of edges you have added, and if it is a multiple of 1000, print\n # something so that you know your program is making progress.\n n = len(self.vertices)\n\n\n\n \n # -----------------------------------------\n print(\"Done Constructing Edges.\\n------------------------------------\")", "def get_edge_list(self):\n return [(edge.value, edge.node_from.value, edge.node_to.value) for edge in self.edges]", "def edge_list(self) -> List[Edge]:\r\n return [edge for edge in sorted(self._edges.values(), key=attrgetter(\"key\"))]", "def edges_list(self):\n return self._edges_list", "def _load_neighbors_from_database(self) -> None:\r\n self._are_neighbors_loaded = True\r\n\r\n graph: Graph = self._graph\r\n neighbors: List[DBNode] = graph.database.Node.find_by_name(self.name).neighbors\r\n nodes: NodeList = graph.nodes\r\n\r\n for db_node in neighbors:\r\n graph.add_node(db_node.name, db_node.external_id)\r\n neighbor: Node = nodes.get_node_by_name(db_node.name)\r\n graph.add_edge(self, neighbor, 1, False)", "def add_edges_from(self, edges: Iterable):\n for i, j in edges:\n self.add_edge(i, j)", "def _initilise_graph_db(self):\n for collector in self.collectors:\n collector.init_graph_db()", "def edges(self) -> EdgeList:\r\n return self._edges", "def generate_edgelist(H, delimiter=\" \"):\n for id in H.edges:\n e = H.edges.members(id)\n yield delimiter.join(map(str, e))", "def _from_gisdb(self):\n self._ways = gpd.read_postgis(sql=\"ways\", con=self._gisdb, geom_col=\"geometry\")\n self._nodes = pd.read_sql(sql=\"nodes\", con=self._gisdb)\n self._edges = pd.read_sql(sql=\"graph_edges\", con=self._gisdb)\n # graph_nodes = gpd.read_postgis(sql=\"graph_nodes\", con=self._gisdb, geom_col=\"geometry\")", "def populate_graph(self):\n if self.edges and self.vertices:\n graph = Graph()\n for edge in self.edges:\n graph.add_edge(edge)\n self.graph = graph\n else:\n print(\"Populate edges & vertices first, then populate graph!\")", "def addEdges(self, edges):\n for edge in edges:\n self.addEdge(edge[0], edge[1], edge[2])", "def __generate_edges(self):\n\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append( {vertex,neighbour} )\n return edges", "def getEdges(self):\n edgeList = []\n for v in self.adjList:\n for i in range(len(self.adjList[v])):\n edgeList.append((v, self.adjList[v][i]))\n return edgeList", "def __generate_edges(self):\r\n edges = []\r\n for vertex in self.__graph_dict:\r\n for neighbor in self.__graph_dict[vertex]:\r\n if {neighbor, vertex} not in edges:\r\n edges.append({vertex, neighbor})\r\n return edges", "def contribs2edges():\r\n client = mongo.MongoClient(config[\"MONGO_URI\"])\r\n db = client.links\r\n db.edges.remove()\r\n edges = dict()\r\n for contrib in db.contribs.find():\r\n for item in contrib[\"data\"]:\r\n id = u\"{} {}\".format(item[\"name_1\"], item[\"name_2\"]).replace(\" \", \"_\")\r\n edge = edges.get(id) #db.edges.find_one({\"_id\" : id}))\r\n if not edge:\r\n edge = {\"_id\" : id, \"name_1\" : item[\"name_1\"], \"name_2\" : item[\"name_2\"], \"tags\" : []}\r\n for tag in item[\"tags\"]:\r\n edge_tag = filter(lambda x: x[\"name\"] == tag, edge[\"tags\"])\r\n if len(edge_tag):\r\n edge_tag = edge_tag[0]\r\n else:\r\n edge_tag = {\"name\" : tag, \"urls\" : []}\r\n edge[\"tags\"].append(edge_tag)\r\n if item[\"url\"] not in edge_tag[\"urls\"]:\r\n edge_tag[\"urls\"].append(item[\"url\"])\r\n if id not in edges:\r\n edges[id] = edge\r\n db.edges.insert(edges.values())", "def __generate_edges(self):\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append({vertex, neighbour})\n return edges", "def edges_list(self, edges_list):\n for e in edges_list:\n exceptions.check_pertinent_edge(self, e)\n self._edges_list = edges_list", "def edges(self):\n for e in self._edges:\n yield e", "def edge_mapping(self):\n ..." ]
[ "0.6576329", "0.6504471", "0.6298686", "0.6290396", "0.6286582", "0.61591196", "0.6077323", "0.60347456", "0.5996814", "0.59027344", "0.5868906", "0.5867808", "0.58494437", "0.5840155", "0.5787023", "0.5771977", "0.5738752", "0.5715439", "0.5699809", "0.56931144", "0.5678498", "0.567053", "0.5639138", "0.56381476", "0.5627551", "0.5593458", "0.558789", "0.5575747", "0.5545143", "0.5528601" ]
0.72010344
0
Populate a list with Vertex objects, mapped from db.
def populate_vertices(self, vertices_list): vertices = [] for vertex in vertices_list: vertex_id = vertex[0] vertices.append(Vertex(vertex_id)) self.vertices = vertices
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_vertices(self):\n self.vertices = []\n for key in self.graph:\n self.vertices.append(self.Vertex(key, self.graph[key]))", "def fetch_from_sqlite(self):\n conn = get_sqlite()\n c = conn.cursor()\n c.execute('SELECT * FROM vertices ORDER BY id')\n vertices =c.fetchall()\n c.execute('SELECT * FROM edges')\n edges =c.fetchall()\n conn.commit()\n\n self.graph.add_vertices(len(vertices))\n for one in vertices:\n id =int(one[0])\n self.graph.vs[id][\"name\"] = one[1]\n self.graph.vs[id][\"parent\"] = one[2]\n self.graph.vs[id][\"size\"] = one[3]\n self.graph.vs[id][\"last_modified\"] = one[4]\n self.graph.vs[id][\"last_accessed\"] = one[5]\n\n for one in edges:\n self.graph.add_edges([(one[0],one[1])])", "def _create_vertex_list(self):\n raise NotImplementedError('_create_vertex_list must be defined in '\n 'order to use group or batch properties')", "def _generate_vertexes(self):\n # generate list of sets for each vms\n self._graph = [set() for _ in range(self._vm_count)]", "def add_vertices(self, vertices_list):\n for vertex in vertices_list:\n self.add_vertex(vertex)", "def initialize_vertices(self, objects_: Union[Dict[str, List[Union[str, int]]], NamedTuple],\n root_name: str, independent: bool, group: int,\n vertices_list: list = None) -> list:\n if vertices_list is None:\n vertices_list = []\n if not isinstance(vertices_list, list):\n raise TypeError(f\"Expected List, got {type(vertices_list)}\")\n\n group_store = {}\n for key, values in objects_.items():\n if key == root_name:\n if type(values) == int or type(values) == str:\n node = Graph_Node(subtype_name=key, value=values,\n independent=True, level=\"root\")\n node.group.add(group)\n group_store[key] = node\n\n # add root nodes to the class node list\n self._add_node(node)\n else:\n raise TypeError(\"Expected value of 'root_name' key to be a str or int\")\n else:\n node = Graph_Node(subtype_name=key, value=values,\n independent=independent)\n node.group.add(group)\n group_store[key] = node\n vertices_list.append(group_store)\n\n return vertices_list", "def initialize_graph(self, V, edge_list):\n # ---------- INSERT CODE BELOW ----------\n for _ in range(V):\n self.add_vertex()\n \n for node in edge_list:\n self.add_edge(node[0],node[1],node[2])\n\n # ---------- INSERT CODE ABOVE ----------", "def _load_vertices_from_path(self, path):\n logging.info(\"Loading vertices from %r\", path)\n sorted_to_import = sorted(\n _search_for_vertex_id(path),\n key=lambda x: x[0]\n )\n\n for ident, label, prop_file in sorted_to_import:\n properties = json.load(open(prop_file)) if prop_file else {}\n\n # reset the id to the id being loaded.\n self._id_tracker.vid = ident\n vertex = super(PersistentGraph, self).add_vertex(\n label, **properties\n )\n # due to pylint bug https://github.com/PyCQA/pylint/issues/379, we\n # need to disable assigning-non-slot errors\n vertex.path = os.path.join(path, label, str(ident)) # pylint: disable=assigning-non-slot", "def obtener_vertices(self):\n return list(self.vertices.keys())", "def vertex_generator(self):\n for V in self.Vrepresentation():\n if V.is_vertex():\n yield V", "def vertices(self):\n try:\n return self._vertices\n except:\n self._vertices = [list(x) for x in self.vertex_generator()]\n return self._vertices", "def loadallvertices(self):\n if self.filedb is None:\n return\n vertices = dict()\n line_pattern = r\"[A-Z]{3},[A-Z]{3},[\\d]+$\"\n try:\n with open(self.filedb) as f:\n for line in f:\n # Recover origin, destiny and cost\n if bool(re.match(line_pattern, line)):\n start, finish, cost = line.rstrip('\\n\\r').split(\",\")\n # Create route entry\n route = {finish: int(cost)}\n origin_dict = vertices.get(start)\n if origin_dict is not None:\n origin_dict.update(route)\n vertices[start] = origin_dict\n else:\n vertices[start] = route\n\n with open(self.filedb) as f:\n for line2 in f:\n if bool(re.match(line_pattern, line2)):\n # Recover origin, destiny and cost\n start, finish, cost = line2.rstrip('\\n\\r').split(\",\")\n # Finish must be a vertice also\n if vertices.get(finish) is None:\n vertices[finish] = {finish: 0}\n\n except Exception as e:\n logging.error(\"File open error.\" + str(e))\n return None\n\n return vertices", "def __init__(self):\n self.vert_list = {}\n self.num_vertices = 0", "def __init__(self, vertex):\n self.id = vertex\n self.neighbors = {}", "def _initilise_graph_db(self):\n for collector in self.collectors:\n collector.init_graph_db()", "def populate_graph(self):", "def populate_graph(self):\n if self.edges and self.vertices:\n graph = Graph()\n for edge in self.edges:\n graph.add_edge(edge)\n self.graph = graph\n else:\n print(\"Populate edges & vertices first, then populate graph!\")", "def get_vertices(self) -> []:\n return [i for i in self.adj_list]", "def __init__(self, vertices):\n self.vertices = vertices", "def _from_db_object_list(db_objects, cls, context):\n return [NodeGroup._from_db_object(cls(context), obj)\n for obj in db_objects]", "def vertices(self):\n return self.pointlist", "def __init__(self,vertices):\n self._vertices = vertices\n self._edges = []\n for i in range(len(self._vertices)-1)\n self._edges.append( [i,i+1] )", "def _create_nodes_from_vertices(self, vertices: List[np.ndarray]) -> List[str]:\n nodes = []\n for vertice in vertices:\n lon, lat = self.proj(vertice[0], vertice[1], inverse=True)\n node = Node(self.id_count, lat, lon)\n nodes.append(node.id_)\n self.osm.add_node(node)\n return nodes", "def list_vertices(self):\n return list(self.graph_dict.keys())", "def __init__(self, vertex, data=None):\n self.id = vertex\n self.data = data\n self.neighbors = {}", "def vertices(self):\n return list(self._graph)", "def _from_gisdb(self):\n self._ways = gpd.read_postgis(sql=\"ways\", con=self._gisdb, geom_col=\"geometry\")\n self._nodes = pd.read_sql(sql=\"nodes\", con=self._gisdb)\n self._edges = pd.read_sql(sql=\"graph_edges\", con=self._gisdb)\n # graph_nodes = gpd.read_postgis(sql=\"graph_nodes\", con=self._gisdb, geom_col=\"geometry\")", "def __init__(self, vertices=[], edges=[]):\n for vertex in vertices:\n self.add_vertex(vertex)\n for edge in edges:\n self.add_edge(edge)", "def _from_db_object_list(db_objects, cls, context):\n return [Boar._from_db_object(cls(context), obj)\n for obj in db_objects]", "def add_vertices(self, vertices: Iterable[\"Vertex\"]) -> Sequence[int]:\n indices = []\n precision = self.precision\n for vertex in vertices:\n vertex = Vec3(vertex)\n key = vertex.round(precision) # type: ignore\n try:\n index, count = self.ledger[key]\n except KeyError: # new key\n index = len(self.vertices)\n self.vertices.append(vertex)\n self.ledger[key] = (index, 1)\n else: # update key entry\n # calculate new average location\n average = (self.vertices[index] * count) + vertex\n count += 1\n # update vertex location\n self.vertices[index] = average / count\n # update ledger\n self.ledger[key] = (index, count)\n indices.append(index)\n return tuple(indices)" ]
[ "0.71661055", "0.6847688", "0.66699135", "0.6233327", "0.60095", "0.59055865", "0.58005416", "0.57749695", "0.57571894", "0.572714", "0.571261", "0.5698523", "0.56625485", "0.5659618", "0.565549", "0.56113976", "0.55910236", "0.558864", "0.5581662", "0.5579575", "0.5548817", "0.5542447", "0.55143034", "0.5510657", "0.5507221", "0.5503638", "0.54694855", "0.54644716", "0.5448731", "0.54349583" ]
0.77077585
0
sets a series of parameters of the experiment setup or document according to a dictionary if an experiment is currently running, will attach as a pending update, which will be set once the experiment stops running through the server callback method for the experiment completed event
def set_param(self,set_dict): if self.query_running(): self.params_pending = True self.pending_params = set_dict return "Pending" for param in set_dict: root={"EXP":self.app,"DM":self.appdoc}[param.split("_")[0]] root.SetParam(win32com.client.constants.__dicts__[0][param],set_dict[param]) rootd={"EXP":self.app_param,"DM":self.appdoc_param}[param.split("_")[0]] rootd.update({param:root.GetParam(win32com.client.constants.__dicts__[0][param])[0]}) return "Updated"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update(self):\n self.all_params = {}\n self._update_experiment_params()\n self._update_preprocessing_params()\n self._update_model_params()", "def update(self, methodName=None, elementName=None, args=None, kwargs=None):\n #if methodName != 'run':\n # return\n ##print methodName\n ##from dbgp.client import brk; brk(port=9011)\n #self.iteration = self.experiment.position.iter\n #exp = self.experiment\n ## check if the pause button was clicked\n #if self.pause:\n # exp.pause = True\n #elif self.runCount is not None:\n # self.runCount -= 1\n # if self.runCount == 0:\n # exp.pause = True\n #\n #runtimelistener.listenersEnabled = exp.pause", "def update_parameters(updates):\r\n for (key, val) in updates.items():\r\n par[key] = val\r\n print('Updating:', key, '-->', val)\r\n update_dependencies()", "def _update_params(self):\n log.debug(\"Updating parameter dict\")\n old_config = self._param_dict.get_config()\n self._get_config()\n new_config = self._param_dict.get_config() \n if (new_config != old_config):\n self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)", "def gather_experiment_parameters(self):\n consts = win32com.client.constants.__dicts__[0]\n exp_params = [r for r in consts.keys() if len(r.split(\"EXP_\")) > 1]\n dm_params = [r for r in consts.keys() if len(r.split(\"DM_\")) > 1]\n self.app_param = {} \n self.appdoc_param = {} \n for p in exp_params:\n self.app_param.update({p:self.app.GetParam(consts[p])})\n\n for p in dm_params:\n #self.appdoc_param.update({p:self.app.GetParam(consts[p])}) bug? call appdoc? CP\n\n self.appdoc_param.update({p:self.app.GetParam(consts[p])})", "def update_parameters(self):\n # We update gamma, gamma0, lambda and nu in turn (Bottolo et al, 2011)\n self.update_gamma()\n self.update_gamma0()\n self.update_lambda()\n self.update_nu()\n if self.sample_xi:\n self.update_xi()", "def _update_params(self, *args, **kwargs):\n\n \n # Get old param dict config.\n old_config = self._param_dict.get_config()\n \n # Issue display commands and parse results.\n timeout = kwargs.get('timeout', SBE37_TIMEOUT)\n self._do_cmd_resp('ds',timeout=timeout)\n self._do_cmd_resp('dc',timeout=timeout)\n \n # Get new param dict config. If it differs from the old config,\n # tell driver superclass to publish a config change event.\n new_config = self._param_dict.get_config()\n if new_config != old_config:\n self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)", "def update_params(self):\n pass", "def updateParameters(self):\n\n return", "def update(self, **params):\n self.parameters.update(params)", "def update_values(self, to_update):\n for key, value in kwargs.iteritems():\n self.params[key] = value\n # update the possibly dependent parameters\n self.set_filenames()", "def emb_experiment():\n print(\"EMBEDDINGS EXPERIMENT\")\n\n # set the name of the experiment\n now = datetime.datetime.now()\n experiment_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute)\n experiment_name = 'emb_' + str(experiment_id)\n\n # define if you want to use preprocessed data from file\n use_prep_data = False\n if use_prep_data:\n set_params(preproc_data_id='16_5_10.16.47')\n else:\n set_params(use_preproc_data=False)\n\n # define the changing parameter and its value\n changing_param_name = 'use_word_emb'\n changing_param_value = [0, 1]\n # {0:4, 1:100}, {0:3, 1:100}, {0:2, 1:100}, {0:1, 1:100}] #[{0:1, 1:1}, {0:15, 1:85}]\n\n # set constant parameters\n set_params(epochs=20)\n set_params(dropout=0.3)\n\n # save constant parameters to a new \"experiment_..\" file\n save_constant_parameters(experiment_name, changing_param_name)\n\n # run experiment for every parameter value\n for value in changing_param_value:\n process = psutil.Process(os.getpid())\n print(\"-----MEMORY before starting experiment ------\", int(process.memory_info().rss/(8*10**(3))), \"KB\")\n\n # update the parameter value\n set_params(use_word_emb = value)\n\n # update the model_id for this new model\n now = datetime.datetime.now()\n new_model_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute) + \".\" + str(now.second)\n set_params(model_id=new_model_id)\n\n # evaluate the new model and save the results in the experiment file\n oneExperiment = Process(target=run_experiment, args=(experiment_name, new_model_id, changing_param_name, value,))\n oneExperiment.start()\n oneExperiment.join()\n\n if value == changing_param_value[0]:\n set_params(preproc_data_id=new_model_id)", "def _update_params(self):\n pass", "def updateParameters(self, parameters):", "def _apply_params(self):\n config = self.get_startup_config()\n # Pass true to _set_params so we know these are startup values\n self._set_params(config, True)", "def _update_params(self):\n _load = not self.san_interface.runmode\n params={}\n if ('iosched' in self._updatedattr or _load) and self.iosched<>IoSchedType.default:\n params['iosched']=str(self.iosched)\n if ('readahead' in self._updatedattr or _load) and self.readahead :\n params['readahead']=self.readahead\n if params:\n for pt in self.paths():\n pt.provider.set_dev_params(pt,params)", "def setExperiment(self, **kwargs):\n # If the dictionary robot value is 'tb1' then change the button Style\n global robot_Selected_Value\n if kwargs['robot'] =='1':\n robot_Selected_Value = 'TB1'\n elif kwargs['robot'] =='2':\n robot_Selected_Value = 'TB2'\n elif kwargs['robot'] =='3':\n robot_Selected_Value = 'TB3'\n elif kwargs['robot'] =='4':\n robot_Selected_Value = 'TB4'\n elif kwargs['set'] =='OK':\n # CONFIGURATION VARIABLES\n robot_Type_Value = self.robot_Selection_Type.currentText()\n robot_Role_Value = self.robot_Selection_Role.currentText()\n robot_Task_Value = self.robot_Selection_Task.currentText()\n robot_Behavior_Value = self.robot_Selection_Behavior.currentText()\n robot_Experiment_Value = self.robot_Selection_Experiment.currentText()\n # XML CREATION\n environmentXMLFile = et.Element('EXP_CONFIGURATIONS')\n comment = et.Comment(\"Experiment Configuration and Variables\")\n environmentXMLFile.append(comment)\n environmentConfig = et.SubElement(environmentXMLFile, 'ROBOT_SELECTED')\n environmentConfig.text = str(robot_Selected_Value)\n environmentConfig = et.SubElement(environmentXMLFile, 'ROBOT_TYPE')\n environmentConfig.text = str(robot_Type_Value)\n environmentConfig = et.SubElement(environmentXMLFile, 'ROBOT_ROLE')\n environmentConfig.text = str(robot_Role_Value)\n environmentConfig = et.SubElement(environmentXMLFile, 'ROBOT_TASK')\n environmentConfig.text = str(robot_Task_Value)\n environmentConfig = et.SubElement(environmentXMLFile, 'ROBOT_BEHAVIOR')\n environmentConfig.text = str(robot_Behavior_Value)\n environmentConfig = et.SubElement(environmentXMLFile, 'ROBOT_EXPERIMENT')\n environmentConfig.text = str(robot_Experiment_Value)\n try:\n tree = et.ElementTree(environmentXMLFile)\n tree.write('experimentConfig.xml', encoding='utf8')\n sendFiles.sshSendFiles()\n operationSucess()\n except Exception:\n operationError()", "def _set_params(self, *args, **kwargs):\n try:\n params = args[0]\n except IndexError:\n raise InstrumentParameterException('Set command requires a parameter dict.')\n\n self._verify_not_readonly(*args, **kwargs)\n update_params = False\n\n # check values that the instrument doesn't validate\n # handle special cases for driver specific parameters\n for (key, val) in params.iteritems():\n if key == Parameter.PUMP_DELAY and (val < MIN_PUMP_DELAY or val > MAX_PUMP_DELAY):\n raise InstrumentParameterException(\"pump delay out of range\")\n elif key == Parameter.NUM_AVG_SAMPLES and (val < MIN_AVG_SAMPLES or val > MAX_AVG_SAMPLES):\n raise InstrumentParameterException(\"num average samples out of range\")\n\n for (key, val) in params.iteritems():\n\n old_val = self._param_dict.format(key)\n new_val = self._param_dict.format(key, val)\n log.debug(\"KEY = %r OLD VALUE = %r NEW VALUE = %r\", key, old_val, new_val)\n\n if old_val != new_val:\n update_params = True\n if ConfirmedParameter.has(key):\n # We add a write delay here because this command has to be sent\n # twice, the write delay allows it to process the first command\n # before it receives the beginning of the second.\n self._do_cmd_resp(Command.SET, key, val, write_delay=0.2)\n else:\n self._do_cmd_resp(Command.SET, key, val, **kwargs)\n\n log.debug(\"set complete, update params\")\n self._update_params()\n if update_params:\n self._update_params()", "def update(self, params):", "def __update_params(self,**kwargs):\n updatedArgSet = set(self._updateParamsArgs) & kwargs.viewkeys()\n if len(updatedArgSet) > 0:\n args = self._subDictionary(self._updateParamsArgs)\n newArgs = self._onParamsUpdate(**args)\n updatedArgs =dict()\n for k in updatedArgSet:\n try:\n updatedArgs[k] = newArgs[k]\n except:\n pass\n\n self.__dictionary.update(newArgs)\n else:\n pass", "def update_parameters(mode = str(None), estimator_name = str(None), **kwargs):\n try:\n json_p = os.path.join(os.path.dirname(__file__), 'parameters.json')\n with open(json_p,'r',encoding='utf-8') as d_file:\n para = json.load(d_file)\n print(f\"Previous Parameters are: {para[mode][estimator_name]}\")\n para[mode][estimator_name] = kwargs\n print(f\"Current Parameters are updated as: {para[mode][estimator_name]}\")\n json_p = os.path.join(os.path.dirname(__file__), 'parameters.json')\n w_file = open(json_p, \"w\",encoding='utf-8')\n json.dump(para, w_file)\n w_file.close()\n print('Done with the parameters update.')\n except:\n print('Failed to update the parameters.')", "def update_parameters(self):\n self.alignment_factor = rospy.get_param('/dyn_reconf/alignment_factor')\n self.cohesion_factor = rospy.get_param('/dyn_reconf/cohesion_factor')\n self.separation_factor = rospy.get_param('/dyn_reconf/separation_factor')\n self.avoid_factor = rospy.get_param('/dyn_reconf/avoid_factor')\n self.max_speed = rospy.get_param('/dyn_reconf/max_speed')\n self.max_force = rospy.get_param('/dyn_reconf/max_force')\n self.friction = rospy.get_param('/dyn_reconf/friction')\n self.crowd_radius = rospy.get_param('/dyn_reconf/crowd_radius')\n self.search_radius = rospy.get_param('/dyn_reconf/search_radius')\n\n rospy.loginfo(rospy.get_caller_id() + \" -> Parameters updated\")\n if DEBUG:\n print('alignment_factor: ', self.alignment_factor)\n print('cohesion_factor: ', self.cohesion_factor)\n print('separation_factor: ', self.separation_factor)\n print('avoid_factor: ', self.avoid_factor)\n print('max_speed: ', self.max_speed)\n print('max_force: ', self.max_force)\n print('friction: ', self.friction)\n print('crowd_radius: ', self.crowd_radius)\n print('search_radius: ', self.search_radius)", "def update(self, **kwargs):\n for k, v in kwargs.items():\n if k not in VALID_CONFIG_KEYS:\n cprint(\"war\", f\"'{k}' is not a valid key, skipping...\")\n continue\n\n if v:\n v = self._validate_option(k, v)\n self.data[k] = v", "def _set_params(self, *args, **kwargs):\n\n params = args[0]\n\n # check for attempt to set readonly parameters (read-only or immutable set outside startup)\n self._verify_not_readonly(*args, **kwargs)\n old_config = self._param_dict.get_config()\n\n for (key, val) in params.iteritems():\n log.debug(\"KEY = \" + str(key) + \" VALUE = \" + str(val))\n self._param_dict.set_value(key, val)\n\n new_config = self._param_dict.get_config()\n # check for parameter change\n if not dict_equal(old_config, new_config):\n self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)", "def params(self,new):\n self._params = new\n self._config_set()\n self._make_model()", "def _update_params(self):\n raise NotImplementedException()", "def test_update(self):\n optimizer = \"RandomSearch\"\n name = \"test_init_experiment\"\n param_defs = {\n \"x\": MinMaxNumericParamDef(0, 1),\n \"name\": NominalParamDef([\"A\", \"B\", \"C\"])\n }\n minimization = True\n\n LAss = PrettyLabAssistant()\n LAss.init_experiment(name, optimizer, param_defs, minimization=minimization)\n cand = LAss.get_next_candidate(name)\n cand.result = 1\n LAss.update(name, cand)\n assert_items_equal(LAss.exp_assistants[name].experiment.candidates_finished, [cand])\n assert_equal(LAss.exp_assistants[name].experiment.candidates_finished[0].result, 1)", "def update_workflow_params(**kwargs) -> None:\n workflow_params_file = upsearch(WORKFLOW_PARAMS_FILENAME)\n workflow_params = load_workflow_params()\n for k, v in kwargs.items():\n if k in workflow_params:\n workflow_params[k] = v\n\n with workflow_params_file.open(\"w\") as f:\n f.write(json.dumps(workflow_params, indent=4))", "def update_parameters(self,like_params):\n\n # get current dictionary with parameters, update and setup again\n params=self.get_params()\n\n for par in like_params:\n if par.name in params:\n params[par.name]=par.value\n\n self._setup_from_parameters(params)\n return", "def set_params(self, params):" ]
[ "0.62047803", "0.5832001", "0.5816899", "0.5796853", "0.57953626", "0.57526183", "0.57322884", "0.5712759", "0.5709553", "0.56523895", "0.55731404", "0.555652", "0.5542452", "0.54959196", "0.54886276", "0.5480578", "0.54759264", "0.54615855", "0.54320467", "0.5375757", "0.53588575", "0.5347291", "0.53269404", "0.53262365", "0.5313532", "0.5299984", "0.52857924", "0.5265989", "0.5252627", "0.5242264" ]
0.6349464
0
sets the camera autoframing mode, which if true will immediately begin collection of the next frame after each arrives
def set_autoframing(self,value=True): self.autoframe=value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def live():\n m = camera.status.mode\n print \"Hit ^C to exit.\"\n print \"NOTE! After using this command, type: mode('%s') \" % m\n mode('centre')\n try:\n while True:\n f = camera.GetFits()\n camera.status.update()\n setheaders(f)\n camera.status.lastact = time.time() #Record the time that the last image was taken\n xpa.displayimage(f)\n except KeyboardInterrupt:\n logger.error(\"Live mode aborted, dumping image.\")\n finally:\n mode(m) #Restore original camera mode (hopefully)", "def warmup():\n print camera.CoolerOFF()\n camera.status.update()", "def _automatic_rendering(self) -> None:\n if self.viewer is not None and self.enable_auto_render:\n\n if self._record_video_wrapper and self._record_video_wrapper.video_recorder:\n self._record_video_wrapper.video_recorder.capture_frame()\n else:\n self.render()", "def initialCamera(self, cmd):\n\n pass", "def set_manual_mode(self):\n self._kernel.set_manual_mode()", "def run_single_camera(cam):\n\n try:\n # Retrieve TL device nodemap and print device information\n #nodemap_tldevice = cam.GetTLDeviceNodeMap()\n\n #result &= print_device_info(nodemap_tldevice)\n\n # Initialize camera\n cam.Init()\n\n # Retrieve GenICam nodemap\n nodemap = cam.GetNodeMap()\n exposures=[2000,4000,8000,16000]\n index=0\n if cam.ExposureAuto.GetAccessMode() != PySpin.RW:\n print(\"Unable to disable automatic exposure. Aborting...\")\n return False\n node_acquisition_mode = PySpin.CEnumerationPtr(nodemap.GetNode(\"AcquisitionMode\"))\n if not PySpin.IsAvailable(node_acquisition_mode) or not PySpin.IsWritable(node_acquisition_mode):\n print(\"Unable to set acquisition mode to continuous (enum retrieval). Aborting...\")\n return False\n\n # Retrieve entry node from enumeration node\n node_acquisition_mode_continuous = node_acquisition_mode.GetEntryByName(\"Continuous\")\n if not PySpin.IsAvailable(node_acquisition_mode_continuous) or not PySpin.IsReadable(node_acquisition_mode_continuous):\n print(\"Unable to set acquisition mode to continuous (entry retrieval). Aborting...\")\n return False\n\n acquisition_mode_continuous = node_acquisition_mode_continuous.GetValue()\n\n node_acquisition_mode.SetIntValue(acquisition_mode_continuous)\n\n print(\"Acquisition mode set to continuous...\")\n\n cam.ExposureAuto.SetValue(PySpin.ExposureAuto_Off)\n '''\n # Set maximum width\n #\n # *** NOTES ***\n # Other nodes, such as those corresponding to image width and height,\n # might have an increment other than 1. In these cases, it can be\n # important to check that the desired value is a multiple of the\n # increment.\n #\n # This is often the case for width and height nodes. However, because\n # these nodes are being set to their maximums, there is no real reason\n # to check against the increment.\n if cam.Width.GetAccessMode() == PySpin.RW and cam.Width.GetInc() != 0 and cam.Width.GetMax != 0:\n cam.Width.SetValue(FRAME_WIDTH)\n print(\"Width set to %i...\" % cam.Width.GetValue())\n\n else:\n print(\"Width not available...\")\n result = False\n\n # Set maximum height\n #\n # *** NOTES ***\n # A maximum is retrieved with the method GetMax(). A node's minimum and\n # maximum should always be a multiple of its increment.\n if cam.Height.GetAccessMode() == PySpin.RW and cam.Height.GetInc() != 0 and cam.Height.GetMax != 0:\n cam.Height.SetValue(FRAME_HEIGHT)\n print(\"Height set to %i...\" % cam.Height.GetValue())\n\n else:\n print(\"Height not available...\")\n result = False\n '''\n print(\"Automatic exposure disabled...\")\n #node_acquisition_framerate = PySpin.CFloatPtr(nodemap.GetNode(\"AcquisitionFrameRate\"))\n\n # if not PySpin.IsAvailable(node_acquisition_framerate) and not PySpin.IsReadable(node_acquisition_framerate):\n # print(\"Unable to retrieve frame rate. Aborting...\")\n # return False\n\n # framerate_to_set = node_acquisition_framerate.GetValue()\n\n # print(\"Frame rate to be set to %d...\" % framerate_to_set)\n canvas=np.zeros((FRAME_HEIGHT*2,FRAME_WIDTH*2,3), np.uint8)\n while True:\n exposure=exposures[index]\n \n configure_exposure(cam, exposure)\n # Acquire images\n err, img,width,height = acquire_images(cam, nodemap)\n if err < 0:\n return err\n\n \n img = img.GetData().reshape(height,width,3)\n\n half_height = int(height/2)\n half_width = int(width/2)\n half_frame_height = int(FRAME_HEIGHT/2)\n half_frame_width = int(FRAME_WIDTH/2)\n \n img = img[half_height-half_frame_height:half_height+half_frame_height,half_width-half_frame_width:half_width+half_frame_width]\n #smallimg=cv2.resize(img,(int(FRAME_WIDTH/2),int(FRAME_HEIGHT/2)))\n if index==0:\n #top left\n canvas[0:FRAME_HEIGHT,0:FRAME_WIDTH]=img\n elif index==1:\n #top right\n canvas[0:FRAME_HEIGHT,FRAME_WIDTH:FRAME_WIDTH*2]=img\n elif index==2:\n #bot left\n canvas[FRAME_HEIGHT:FRAME_HEIGHT*2,0:FRAME_WIDTH]=img\n else:\n #bot right\n canvas[FRAME_HEIGHT:FRAME_HEIGHT*2,FRAME_WIDTH:FRAME_WIDTH*2]=img\n index+=1\n if index>=len(exposures):\n index=0\n\n cv2.imshow(\"frame\",canvas)\n if cv2.waitKey(1) &0xff ==ord('q'):\n #stop the feed the 'q'\n break\n cv2.destroyAllWindows()\n # Deinitialize camera\n cam.DeInit()\n\n except PySpin.SpinnakerException as ex:\n print(\"Error: %s\" % ex)\n result = False", "def setupCamera(self):\n\t\tself.eye = self.vr.newEye(\"test_cam\")\n\t\tself.eye.reposition(0.0, 1.0, 0.5, 0.0, 0.0, 0.0)\n\t\tself.eye.setFOV(self.config.camFOV)\n\t\n\t\tself.video.clear(\"black\")\n\t\tself.video.show(self.eye, 0, 0)", "def enable_motion_detection(self):\n ret = self._nvr.set_camera_recording(self._uuid, \"motion\")\n if not ret:\n return\n\n self._motion_status = \"motion\"\n self._isrecording = True\n _LOGGER.debug(\"Motion Detection Enabled for Camera: %s\", self._name)", "def update_frame(self):\n if self.should_reset_camera:\n self.ren.ResetCamera()\n self.should_reset_camera = False\n self.interactor.Render()\n app.processEvents()", "def update_frame(self):\n if self.should_reset_camera:\n self.ren.ResetCamera()\n self.should_reset_camera = False\n self.interactor.Render()\n app.processEvents()", "def apply_settings(camera):\r\n camera.clear_mode = 0\r\n camera.exp_mode = \"Internal Trigger\"\r\n camera.readout_port = 0\r\n camera.speed_table_index = 0\r\n camera.gain = 1", "def cameraOn():\n cap = cv2.VideoCapture(CAM0, cv2.CAP_DSHOW) # use camera to monitor the motor-mirror assemnbly by DirectShow\n while(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # Display the resulting frame\n cv2.imshow(\" Real-Time Video. Press 'q' to exist.\",frame)\n if cv2.waitKey(8) & 0xFF == ord('q'): #display a frame for 8ms, ~120Hz\n break\n \n cap.release() # release the capture\n cv2.destroyAllWindows()", "def _update_camera(self, render=False):\n self._renderer.set_camera(\n # needs fix, distance moves when focal point updates\n distance=self._renderer.plotter.camera.distance * 0.9,\n focalpoint=tuple(self._ras),\n reset_camera=False)", "def start_free_run(self):\n if self.free_run_running:\n self.logger.info(f'Trying to start again the free acquisition of camera {self}')\n return\n self.logger.info(f'Starting a free run acquisition of camera {self}')\n self.free_run_running = True\n self.logger.debug('First frame of a free_run')\n self.acquisition_mode = self.MODE_CONTINUOUS\n self.trigger_camera() # Triggers the camera only once", "def setMode(self, mode):\n if mode == 0 or mode == 1:\n with self.lock:\n self.mode = mode\n else:\n raise FliError(\"FLISetCameraMode failed\")", "def __reset_camera(self):\n # Reset Camera\n self.scene.camera.pos = vector(5, 5, 12) # Hover above (5, 5, 0)\n # Ever so slightly off focus, to ensure grid is rendered in the right\n # region\n # (if directly at, draws numbers wrong spots)\n # Focus on (5, 5, 0)\n self.scene.camera.axis = vector(-0.001, -0.001, -12)\n self.scene.up = y_axis_vector", "def start_realsense_camera():\n global realsense_enabled, camera, write_bag\n\n if not realsense_enabled:\n realsense_enabled = True\n\n write_bag_path = None\n if write_bag:\n ###\n filename = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n write_bag_path = \"realsense_files/\" + filename + \".bag\"\n\n camera = depth_cam(width=width, height=height, channels=channels,\n enable_rgb=enable_rgb, enable_depth=enable_depth, enable_imu=enable_imu, record_bag=write_bag_path, read_bag=None)\n\n return render_settings_view()", "def set_active(self) -> None:\n self.map.active_cam = self.map.cameras.index(self) + 1", "def camera_set(self) -> bool:\n if self.camera is None: # pragma: no cover\n return False\n return self.camera.is_set", "def setAutoDownsample(self, active=True):\n self.autoDownsample = active\n self._renderRequired = True\n self.update()", "def set_automatic(self, mode):\n self.slam.controlled = not mode\n if mode:\n self.slam.resume()", "def on_run(self):\n self.set_illumination({'mode': 'breathe'})", "def start(self):\n self.ids.camera.opacity=1\n self.ids.camera.play=True\n self.ids.camera_button.text=\"Stop Camera\"\n self.ids.camera.texture = self.ids.camera._camera.texture", "def start(self):\n self.ids.camera.opacity = 1\n self.ids.camera.play = True\n self.ids.start.text = 'Stop Camera'\n self.ids.camera.texture = self.ids.camera._camera.texture", "def start():\n global running\n running = True\n messagebox.showinfo(\"Camera mode\",\"Start image grab\")\n camera.start_preview(fullscreen=False, window = (100,20,612,404))", "def reset_limits(self):\n self.autoscale = True\n self.camera.autoscale()", "def turn_on(self, **kwargs):\n _LOGGER.debug(\"Turning on Motion Detection \")\n self.data.set_camera_recording(self._camera_id, \"motion\")", "def _initialize(self):\n if not self._is_initialized:\n self.connect(retries=Camera.CONNECTION_RETRIES)\n self.cam.resolution = (self.resolution['x'], self.resolution['y'])\n self.cam.start_preview()\n time.sleep(2)\n self._is_initialized = True", "def setupCamera(self) :\n\t\tbase.disableMouse()\n\t\tbase.camera.setPos(self.avatarNP.getPos())\n\t\tbase.camera.setZ(self.avatarNP.getZ()+1.5)\n\t\tbase.camera.setHpr(self.avatarNP.getHpr()[0],0,0)\t\t\n\t\tself.fieldAngle = 46.8\t# similar to human eye;\n\t\t\t\t\t# change this to zoom in/out\n\t\tbase.camLens.setFov(self.fieldAngle)", "def run(self):\n\n last_mean = 0\n st = time.time()\n sframe = 0\n while True:\n if time.time()-1 > st:\n st = time.time()\n #print 'fps', self.frame_counter - sframe\n sframe = self.frame_counter\n\n self.frame_counter += 1\n frame = next(self.frame_generator)\n\n xMax = frame.shape[1]\n yMax = frame.shape[0]\n\n capture_area = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n mean, stddev = cv2.meanStdDev(capture_area)\n mean = mean[0][0]\n stddev = stddev[0][0]\n\n if abs(mean-last_mean) > ACTIVATE_MEAN_DIFF:\n self.wakeup()\n\n last_mean = mean\n\n faces = []\n if abs(self.frame_counter - self.last_face_frame) < 20 or self.frame_counter % 5 == 0:\n faces = faceCascade.detectMultiScale(\n capture_area,\n scaleFactor=1.1,\n minNeighbors=MIN_NEIGHBOURS,\n minSize=(30, 30)\n )\n\n if len(faces) == 1:\n self.last_face_frame = self.frame_counter\n face = faces[0]\n x, y, w, h = face\n\n x1 = x\n x2 = x+w\n y1 = y\n y2 = y+h\n\n # expand_area\n width_plus = int(w/4.0)\n height_plus = int(h/4.0)\n x1 -= width_plus\n x2 += width_plus\n y1 -= height_plus\n y2 += height_plus\n\n y_max, x_max = frame.shape[:2]\n\n x1 = max(0, x1)\n y1 = max(0, y1)\n x2 = min(x_max, x2)\n y2 = min(y_max, y2)\n\n colour_face = frame[y1:y2, x1:x2]\n colour_face = np.copy(colour_face)\n\n face_obj = Face(face, colour_face, self.frame_counter)\n self.capture_face(face_obj)\n\n #st = time.time()\n bm = get_best_match(colour_face)\n match_person = bm\n if match_person is not None:\n self.found_people[match_person] += 1\n\n\n #et = time.time()\n #print et-st\n #result = self.pool.apply_async(get_best_match, (colour_face,))\n #self.pool_results.append(result)\n\n if len(self.pool_results) > 0:\n print(len(self.pool_results))\n res = self.pool_results[0]\n try:\n match_person = res.get()\n print('match here', match_person)\n except TimeoutError:\n pass\n else:\n self.pool_results.popleft()\n if match_person is not None:\n self.found_people[match_person] += 1\n\n # do flush if we have enough frames\n if len(self.capture_buffer) >= FRAMES_COUNT_TO_SAVE:\n self.flush_capture_buffer()\n\n # clear buffer if we never got enough frames\n if len(self.capture_buffer) > 0:\n if self.frame_counter - self.capture_buffer[-1].frame_counter > MAX_FRAMES_BETWEEN_CAPTURES:\n self.flush_capture_buffer()\n\n # Draw a rectangle around the faces\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y), (x+w, y+h), DRAWING_COLOR, 15)\n\n # Display the resulting frame\n frame = cv2.flip(frame, flipCode=1)\n\n if self.draw_wanted_start_frame > self.frame_counter - TEXT_DISPLAY_TIME:\n cv2.putText(frame, \"Thanks!\", (150,250), cv2.FONT_HERSHEY_DUPLEX, 8.0, DRAWING_COLOR, 14)\n if self.thank_person is not None:\n cv2.putText(frame, self.thank_person, (150,450), cv2.FONT_HERSHEY_DUPLEX, 6.0, DRAWING_COLOR, 12)\n\n # When the screen goes off, we hang on waitKey, so don't do it if we haven't done a wakeup recently\n # Also no point in updating the screen if it is off.\n if self.last_wakeup + 40 > time.time():\n cv2.imshow('Video', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # Display the resulting frame\n cv2.imshow('Video', frame)" ]
[ "0.6175358", "0.5965215", "0.59311265", "0.5922155", "0.59183586", "0.5905513", "0.58601236", "0.58371323", "0.57451624", "0.57451624", "0.5739937", "0.5731639", "0.5720027", "0.5715713", "0.57007986", "0.5698071", "0.56938946", "0.56901175", "0.568824", "0.567828", "0.5668664", "0.56613487", "0.56589884", "0.565817", "0.5646619", "0.56246495", "0.56158054", "0.5605285", "0.5584567", "0.557702" ]
0.64564735
0
method that polls for frame completion a local variable is stored from last poll, indicating whether an experiment was in progress. If the new poll indicates it is not, returns True
def _server_poll_expcompleted_(self): #print "class Princeton_CCD function _server_poll_expcompleted_" try: last_state = self.polled_running except (AttributeError,UnboundLocalError): self.polled_running = False last_state = False self.polled_running = self.query_running() if (not bool(last_state) and bool(self.polled_running)): self.begin_acq_time = time.time() #print self.query_running(), last_state #if ((last_state == True) and (self.polled_running == False)): CP if (bool(last_state) and not bool(self.polled_running)): self.end_acq_time = time.time() return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_completion(self):\n\n time.sleep(3)\n while self.status == 0:\n pass", "def poll(self):\n return False", "def is_finished(self):\n self.refresh()\n return self.progress.remaining_budget is not None and self.progress.remaining_budget <= 0", "def is_call_waiting(self) -> bool:", "def __bool__(self):\n return self.wait(0)", "def complete(self):\r\n\tif self.launch_time == INVALID_TIME:\r\n\t print \"Missing probe launch time\"\r\n return False\r\n if self.received_time == INVALID_TIME:\r\n print \"Missing probe received time\"\r\n return False\r\n if self.completion_time == INVALID_TIME:\r\n print \"Missing probe completion time\"\r\n return False\r\n return True", "def detect_completion(self):\n results_dir = glob.glob(f\"{self.production.rundir}\")\n if len(results_dir)>0: # dynesty_merge_result.json\n if len(glob.glob(os.path.join(results_dir[0], f\"extrinsic_posterior_samples.dat\"))) > 0:\n return True\n else:\n return False\n else:\n return False", "def should_poll(self):\r\n return False", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def has_finished():", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False" ]
[ "0.63351786", "0.6147131", "0.60807467", "0.601559", "0.5987755", "0.5970426", "0.5962104", "0.59266925", "0.58878636", "0.58878636", "0.58878636", "0.58878636", "0.58878636", "0.58878636", "0.58878636", "0.58878636", "0.58878636", "0.58878636", "0.58878636", "0.58878636", "0.58878636", "0.58878636", "0.58878636", "0.58640796", "0.5848727", "0.5848727", "0.5848727", "0.5848727", "0.5848727", "0.5848727" ]
0.69292897
0
Endpoint for validating if a posted string is a pangram
def check_string(): # Forcing check for valid json and headers with Content-Type:application/json content = request.get_json(silent=False, force=True) payload = content.get('data', None) if not payload: return response_handler( {"error": "'data' key missing from JSON payload."}, 400 ) if not isinstance(payload, basestring): return response_handler( {"error": "Value of 'data' key is not of type 'string'."}, 400 ) pangram = analyze_string(payload) if not pangram: return response_handler( {"error": False}, 400 ) return response_handler( {"success": True}, 200 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_pangram(string):\n a_pos = ord('a')\n letters = [0] * 26\n for char in string:\n if char.isalpha():\n letters[ord(char.lower()) - a_pos] += 1\n return all(letters)", "def post(self):\n data = request.json\n return check_spelling(data)", "def verify_text(self, text):\n pass", "def verificar_pangrama(cadena):\n for i in range(len(ascii_lowercase)):\n if ascii_lowercase[i] in cadena.lower():\n continue\n else:\n return False\n return True", "def is_pangram(sentence):\n\n result = set()\n \n for char in sentence:\n\n if char.lower().isalpha():\n\n result.add(char.lower())\n\n\n if len(result) == 26:\n\n return True\n\n else:\n\n return False", "def is_valid_ngram(ngram):\n for char in ngram:\n if char not in LETTERS:\n return False\n\n return True", "def is_pangram(sentence):\n\n alpha = set()\n is_alpha = False\n\n for character in sentence:\n alpha.add(character)\n\n if len(alpha) == 26:\n is_alpha = True\n\n return is_alpha", "def password_validator(password):\n if list(PUNCTUATIONS) in password:\n \"\"\"\n >>> list(string.punctuation)\n ['!', '\"', '#', '$', '%', '&', \"'\", '(', ')', '*', '+', ',', '-', '.',\n '/', ':', ';', '<', '=', '>', '?', '@', '[', '\\\\', ']', '^', '_', '`',\n '{', '|', '}', '~']\n >>>\n \"\"\"\n return False\n else:\n return True", "def is_well_formed_gtp_word(s):\n if not isinstance(s, str):\n return False\n if not _gtp_word_characters_re.search(s):\n return False\n return True", "def check_input(input_string):\n if len(input_string) > 50: # check if length of name is less than 50 ir not\n return False\n else:\n return bool(re.match('[a-zA-Z\\s]+$', input_string)) # check is input contains only chars and spaces", "def test_sentence_input(self, sentence):\n if len(sentence.strip()) == 0:\n return False\n # Decode unicode, mainly to normalize fancy quotation marks\n decoded = unidecode(sentence)\n # Sentence shouldn't contain problematic characters\n if self.well_formed and self.reject_pat.search(decoded):\n return False\n return True", "def is_simple (self, phrase):\r\n\r\n return not self.contains(phrase,'()&|>#')", "def test_specialchar(self):\n form_data = self.form_data('vNzwXpzKJyTshvHsuULn')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def is_sms_valid(text=''):\n try:\n text.decode('ascii')\n except:\n return False\n if len(text) > 160:\n return False\n\n return True", "def is_palindromic(phrase):\n\n val = str(phrase).lower().replace(\" \", \"\")\n if val == val[::-1]: # Reverse order\n return True\n else:\n return False", "def is_palindrome_ingoring_case_and_non_letter_chars(text):", "def test_lowercase(self):\n form_data = self.form_data('A=R7-%=K?K@B^!9Q8=C+')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def _validate_string(self, path, value, value_is_key=False):\r\n value = re.sub('[/$#{}._|*=\\-]', ' ', value)\r\n\r\n tokens = nltk.tokenize.word_tokenize(value)\r\n for raw_token in tokens:\r\n if raw_token.startswith(\"'\"):\r\n raw_token = raw_token[1:]\r\n if self.corpus.validate_token(raw_token):\r\n continue\r\n sub_tokens = Validator.camel_case_split(raw_token)\r\n ret = True\r\n for sub_token in sub_tokens:\r\n ret = ret and self.corpus.validate_token(sub_token)\r\n\r\n if not ret:\r\n self.errors.append({\r\n \"isKey\": value_is_key,\r\n \"path\": path,\r\n \"typo\": raw_token,\r\n })", "def is_pangram(sentence):\n\n list = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p',\\\n 'q','r','s','t','u','v','x','z']\n count = 0\n sentence = sentence.lower()\n\n for i in range(0, len(list)):\n for j in sentence:\n if list[i] == j:\n count = count + 1\n list[i] = '>'\n\n if count == 24:\n return True\n\n elif count < 24 or count > 24:\n return False", "def _validate_word(self, word):\n return type(word) == type('a') and set(self._letters) == set(list(word))", "def testStringInput(self):\r\n from pydsl.Check import BNFChecker\r\n from pydsl.contrib.bnfgrammar import productionset0\r\n grammardef = productionset0\r\n checker = BNFChecker(grammardef)\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check((\"S\",\"R\")))\r\n self.assertFalse(checker.check(\"SL\"))\r\n self.assertFalse(checker.check((\"S\",\"L\")))\r\n self.assertFalse(checker.check(\"\"))", "def is_valid_msg(msg):\n for char in msg:\n if char not in string.ascii_letters and char not in string.punctuation and char != ' ':\n return False\n return True", "def isValid(text):\n\n\n return any(word in text for word in [u\"我好看么\", u\"称赞\"])", "def validate(self, text):\n raise NotImplementedError()", "def test_text_is_correct(app):\n rv = app.test_client().post('/tokenize', \n json={\n 'text': \"I still haven't found what i'm looking for\",\n 'lang': 'en'\n })\n json_data = rv.get_json()\n tokens = json_data['tokens']\n assert tokens == ['I', 'still', 'have', 'not', 'found', 'what', 'i', 'am', 'looking', 'for']", "def test_valid_str(self):\n try:\n lowercase_validator('hg213i75%^&$efg')\n except ValidationError:\n self.fail('String raised ValidationError unexpectedly')", "def expected_rubbish(self):", "def is_valid_input(letter_guessed):\n\n #calulate the lengh of the letters\n NumberOfCharacters = (len(letter_guessed))\n #convert input letters to Underscore\n NumberOfUnderscore = (NumberOfCharacters * \"_\")\n\n\n # All the letters in English\n EnglishLetter = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMOPQRSTUVWXYZ\"\n\n\n if NumberOfCharacters > 1:\n print(\"false\")\n\n # If the user entered English character the string will print the character a non-English character (for example, a sign such as: &, *), the string will print \"E2\n elif letter_guessed in EnglishLetter:\n print(\"true\")\n else:\n print(\"false\")", "def is_valid(text):\n return is_all_word_segment_in_text(WORDS, text)", "def is_secret_string(value):\n if not isinstance(value, basestring):\n return False\n return bool(_secret_string_pattern.match(value))" ]
[ "0.61329734", "0.60513806", "0.5977086", "0.5897228", "0.5871303", "0.5826443", "0.5744146", "0.5684479", "0.567305", "0.56470114", "0.56131446", "0.5608203", "0.5595567", "0.5540064", "0.55184543", "0.5503536", "0.54949117", "0.5491614", "0.547028", "0.54653454", "0.54595995", "0.5458726", "0.5438376", "0.5433947", "0.5419377", "0.53999", "0.5396586", "0.5384901", "0.53801244", "0.53788555" ]
0.6516636
0
Pulse Compression is used to increase the range resolution and SNR by performing matched filtering of the transmitted pulse (template) with the received signal (x)
def pulse_compression(x, template, normalize=False, window=None, nfft=None): [num_pulses, samples_per_pulse] = x.shape if nfft is None: nfft = samples_per_pulse if window is not None: Nx = len(template) if callable(window): W = window(cp.fft.fftfreq(Nx)) elif isinstance(window, cp.ndarray): if window.shape != (Nx,): raise ValueError("window must have the same length as data") W = window else: W = get_window(window, Nx, False) template = cp.multiply(template, W) if normalize is True: template = cp.divide(template, cp.linalg.norm(template)) fft_x = cp.fft.fft(x, nfft) fft_template = cp.conj(cp.tile(cp.fft.fft(template, nfft), (num_pulses, 1))) compressedIQ = cp.fft.ifft(cp.multiply(fft_x, fft_template), nfft) return compressedIQ
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gate(filename,threshold,ratio,attack,release,wout=True,plot=True):\n start=time.time()\n if ratio < 1.0:\n print('Ratio must be > 1.0 for compression to occur! You are expanding.')\n if ratio==1.0:\n print('Signal is unaffected.')\n n, data, data_dB,sr,ch=inputwav(filename)\n #Array for the compressed data in dB\n dataC=data_dB.copy()\n #attack and release time constant\n a=np.exp(-np.log10(9)/(44100*attack*1.0E-3))\n re=np.exp(-np.log10(9)/(44100*release*1.0E-3))\n #apply compression\n print('Compressing...')\n for k in range (ch):\n for i in range (n):\n if dataC[i,k]<threshold:\n dataC[i,k]=-100\n #Array for the smooth compressed data with makeup gain applied\n #Convert our dB data back to bits\n dataCs_bit=10.0**((dataC)/20.0)\n #sign the bits appropriately:\n for k in range (ch):\n for i in range (len(data)):\n if data[i,k]<0.0:\n dataCs_bit[i,k]=-1.0*dataCs_bit[i,k]\n #Plot the data:\n if plot==True:\n print('Plotting...')\n t=np.linspace(0,n/(1.0*sr),n)\n py.close()\n fig, (ax1, ax2) = py.subplots(nrows=2) \n #ax2.plot(t,gain,'k-',linewidth=0.1,label='Gain Reduction')\n #ax2.plot(t,sgain,'r-',linewidth=1, label='Gain Reduction Smooth')\n ax1.plot(t,data,'k-',linewidth=1,label=filename)\n ax1.plot(t,dataCs_bit,'m-',linewidth=0.1,\n label=filename+' compressed')\n ax1.axhline(10**(threshold/20.0),linestyle='-',\n color='cyan',linewidth=1)\n ax1.axhline(-10**(threshold/20.0),linestyle='-',\n color='cyan',linewidth=1)\n ax1.legend()\n ax2.legend()\n ax2.set_xlabel('Time (s)')\n ax2.set_ylabel('Gain Reduction (dB)')\n ax1.set_ylabel('Amplitude (Rel. Bit)')\n ax1.set_xlabel('Time (s)')\n #write data to 16 bit file\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_gated.wav',dataCs_bit,\n sr,'PCM_16')\n end=time.time()\n elapsed=int(1000*(end-start))\n print('Done!')\n print('...............................')\n print('Completed in '+str(elapsed)+' milliseconds.') \n return dataC,dataCs_bit", "def compress(filename,threshold,ratio,makeup,attack,release,wout=True,plot=False):\n start=time.time()\n if ratio < 1.0:\n print('Ratio must be > 1.0 for compression to occur! You are expanding.')\n if ratio==1.0:\n print('Signal is unaffected.')\n n, data, data_dB,sr,ch=inputwav(filename)\n #Array for the compressed data in dB\n dataC=data_dB.copy()\n #attack and release time constant\n a=np.exp(-np.log10(9)/(44100*attack*1.0E-3))\n re=np.exp(-np.log10(9)/(44100*release*1.0E-3))\n #apply compression\n print('Compressing...')\n for k in range(ch):\n for i in range (n):\n if dataC[i,k]>threshold:\n dataC[i,k]=threshold+(dataC[i,k]-threshold)/(ratio)\n #gain and smooth gain initialization\n gain=np.zeros(n)\n sgain=np.zeros(n)\n #calculate gain\n gain=np.subtract(dataC,data_dB)\n sgain=gain.copy()\n #smoothen gain\n print('Smoothing...')\n for k in range(ch):\n for i in range (1,n):\n if sgain[i-1,k]>=sgain[i,k]:\n sgain[i,k]=a*sgain[i-1,k]+(1-a)*sgain[i,k]\n if sgain[i-1,k]<sgain[i,k]:\n sgain[i,k]=re*sgain[i-1,k]+(1-re)*sgain[i,k] \n #Array for the smooth compressed data with makeup gain applied\n dataCs=np.zeros(n)\n dataCs=data_dB+sgain+makeup\n #Convert our dB data back to bits\n dataCs_bit=10.0**((dataCs)/20.0)\n #sign the bits appropriately:\n for k in range (ch):\n for i in range (n):\n if data[i,k]<0.0:\n dataCs_bit[i,k]=-1.0*dataCs_bit[i,k]\n #Plot the data:\n if plot==True:\n print('Plotting...')\n t=np.linspace(0,n/(1.0*sr),n)\n py.close()\n fig, (ax1, ax2) = py.subplots(nrows=2) \n ax2.plot(t,gain,'k-',linewidth=0.1,label='Gain Reduction')\n ax2.plot(t,sgain,'r-',linewidth=1, label='Gain Reduction Smooth')\n ax1.plot(t,data,'k-',linewidth=1,label=filename)\n ax1.plot(t,dataCs_bit,'m-',linewidth=0.1,\n label=filename+' compressed')\n ax1.axhline(10**(threshold/20.0),linestyle='-',\n color='cyan',linewidth=1)\n ax1.axhline(-10**(threshold/20.0),linestyle='-',\n color='cyan',linewidth=1)\n ax1.legend()\n ax2.legend()\n ax2.set_xlabel('Time (s)')\n ax2.set_ylabel('Gain Reduction (dB)')\n ax1.set_ylabel('Amplitude (Rel. Bit)')\n ax1.set_xlabel('Time (s)')\n #write data to 16 bit file\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_compressed.wav',dataCs_bit,\n sr,'PCM_16')\n end=time.time()\n elapsed=int(1000*(end-start))\n print('Done!')\n print('...............................')\n print('Completed in '+str(elapsed)+' milliseconds.') \n return dataCs,dataCs_bit", "def ts_method(signal, peaks, template_duration: float = 0.12, fs: int = processing.FS, window: int = 10, **kwargs):\n\n t_dur = round(template_duration * fs)\n if not t_dur % 2 == 0:\n t_dur += 1\n dims = signal.shape\n # if np.max(np.abs(signal[0, :])) < np.max(np.abs(signal[1, :])):\n # r_peaks = find_qrs(signal[1, :], peak_search=peak_search)\n # r_peaks = peak_enhance(signal[1, :], peaks=r_peaks, window=0.2)\n # else:\n # processing.scatter_beautiful(r_peaks * 1000 / fs, title='peaks')\n extracted_signal = np.copy(signal)\n # print(len(r_peaks))\n # Please, rework it...\n for n in range(dims[0]):\n for i in range(0, len(peaks), window):\n\n if i + window > len(peaks):\n r_peaks = peaks[i:]\n else:\n r_peaks = peaks[i:i + window]\n\n template = np.full((len(r_peaks), t_dur), np.nan)\n for num, r_ind in enumerate(r_peaks):\n if r_ind < t_dur // 2:\n template[num, t_dur // 2 - r_ind - 1:] = extracted_signal[n, 0:r_ind + t_dur // 2 + 1]\n elif r_ind + t_dur // 2 + 1 > dims[1]:\n template[num, 0:dims[1] - r_ind + t_dur // 2] = extracted_signal[n, r_ind - t_dur // 2:]\n else:\n template[num] = extracted_signal[n, r_ind - t_dur // 2:r_ind + t_dur // 2]\n template_mean = np.nanmean(template, axis=0) # None for edge cases\n for r_ind in r_peaks:\n if r_ind < t_dur // 2:\n extracted_signal[n, 0:r_ind + t_dur // 2 + 1] -= template_mean[t_dur // 2 - r_ind - 1:]\n # processing.scatter_beautiful(components[n, :], title=' subtracted channel start ' + str(n))\n elif r_ind + t_dur // 2 + 1 > dims[1]:\n extracted_signal[n, r_ind - t_dur // 2:r_ind + t_dur // 2 + 1] -= template_mean[\n 0:dims[1] - r_ind + t_dur // 2]\n # processing.scatter_beautiful(components[n, :], title=' subtracted channel end ' + str(n))\n else:\n extracted_signal[n, r_ind - t_dur // 2:r_ind + t_dur // 2] -= template_mean\n # processing.scatter_beautiful(components[n, :], title=' subtracted channel ' + str(n))\n return extracted_signal", "def transform(self, x):\n\n sr = self.audio_settings[\"sr\"]\n\n ###################\n # Waveform \n ###################\n\n if self.cache < 2:\n if self.aug_settings is not None:\n if \"bg_noise\" in self.aug_settings:\n x = self.bg_adder(samples=x, sample_rate=sr)\n\n if \"time_shift\" in self.aug_settings:\n x = time_shift(x, sr, **self.aug_settings[\"time_shift\"])\n\n if \"resample\" in self.aug_settings:\n x, _ = resample(x, sr, **self.aug_settings[\"resample\"])\n \n x = librosa.util.fix_length(x, sr)\n\n ###################\n # Spectrogram\n ###################\n \n x = librosa.feature.melspectrogram(y=x, **self.audio_settings) \n x = librosa.feature.mfcc(S=librosa.power_to_db(x), n_mfcc=self.audio_settings[\"n_mels\"])\n\n\n if self.aug_settings is not None:\n if \"spec_aug\" in self.aug_settings:\n x = spec_augment(x, **self.aug_settings[\"spec_aug\"])\n\n x = torch.from_numpy(x).float().unsqueeze(0)\n return x", "def preparePulseSequence(self):\n # get carrier frequency\n if self._MWSource is not None:\n carrierFrequency = self.carrierFrequency()\n else:\n carrierFrequency = 0\n\n # Decide to apply or not corrections\n applyCorrectionsArray = [\n pulse.applyCorrections for pulse in self.pulseList]\n applyCorrection = True in applyCorrectionsArray\n\n # Define self.pulseSequenceArray\n self.pulseSequenceArray = zeros(\n self.numberOfPoints(), dtype=numpy.complex128)\n self._offsets = [None, None]\n\n for pulse in self.pulseList:\n if not(pulse.pulseOn):\n continue\n correctedPulseArray = zeros(\n self.numberOfPoints(), dtype=numpy.complex128)\n\n if self._params[\"modulationMode\"] == \"IQMixer\":\n if pulse.frequency is None:\n pulse.frequency = carrierFrequency\n IF = pulse.frequency - carrierFrequency\n # if applyCorrection:\n # calibrationParameters=self._mixer.calibrationParameters(f_sb=pulse.frequency-carrierFrequency, f_c=carrierFrequency)\n # self.debugPrint(calibrationParameters)\n # sidebandPulse=self._mixer.generateSidebandWaveform(f_sb = f_sb,c = calibrationParameters['c'],phi = calibrationParameters['phi'],length=self.numberOfPoints())\n # self._offsets=[calibrationParameters[\"i0\"],calibrationParameters[\"q0\"]]\n # else:\n # sidebandPulse = exp(-1.j*(2.0*math.pi*f_sb*arange(0,self.numberOfPoints())))\n sidebandPulse = self._mixer.generateSidebandWaveform(\n IF=IF, useCalibIfNone=applyCorrection, length=self.numberOfPoints())\n correctedPulseArray[:] = pulse._pulseArray * \\\n sidebandPulse * exp(1.j * pulse.phase)\n\n elif self._params[\"modulationMode\"] == \"SimpleMixer\":\n if applyCorrection:\n self._offsets = [self._mixer.calibrationParameters()]\n else:\n self._offsets[0] = 0\n correctedPulseArray[:] = pulse._pulseArray\n\n elif self._params[\"modulationMode\"] == \"InternalModulation\":\n print \"not configured yet\"\n\n elif self._params[\"modulationMode\"] is None:\n if applyCorrection:\n if hasattr(self, \"pulseCorrectionFunction\"):\n correctedPulseArray[:] = self.pulseCorrectionFunction(\n pulse._pulseArray)\n else:\n print self.name(), \": no correction function found for DC pulses\"\n correctedPulseArray[:] = pulse._pulseArray\n else:\n correctedPulseArray[:] = pulse._pulseArray\n else:\n print \"bad modulationMode\"\n\n self.pulseSequenceArray[:] += correctedPulseArray[:]", "def inst_bp(instrument,array=\"2\"):\n\n if instrument == \"MUSTANG2\" or instrument == \"MUSTANG\":\n srms = (300*u.um).to(\"m\") # surface RMS (microns)\n ### Reference: https://science.nrao.edu/facilities/gbt/proposing/GBTpg.pdf\n EA90 = 0.36 # Aperture efficiency at 90 GHz\n ### The beam efficiencies should be taken as 1.37* Aperture Efficiency\n R90 = np.exp(-4.0*np.pi*(srms/(const.c/(9.0e10*u.s**-1))).value) #\n Gnot = EA90/R90 # Unphysical, but see documentation...\n if instrument == \"MUSTANG2\":\n flow = 75.0 # GHz\n fhig = 105.0 # GHz\n else:\n flow = 82.5 # GHz\n fhig = 97.5 # GHz\n \n farr = np.arange(flow,fhig,1.0) # frequency array.\n tran = farr*0.0 + 1.0 # Let the transmission be unity everywhere.\n Larr = const.c.value/(farr*1.0e9) # Keep calm and carry on.\n ### Old formula:\n #Ruze = Gnot * np.exp(-4.0*np.pi*(srms.value)/Larr)\n ### Correct formula: (10 April 2018)\n Ruze = Gnot * np.exp(-(4.0*np.pi*srms.value/Larr)**2)\n NRuz = Ruze / np.max(Ruze) # Normalize it\n band = tran * Ruze # Bandpass, with (unnormalized) Ruze efficiency\n \n if instrument == \"NIKA2\" or instrument == \"NIKA\":\n caldir='/home/romero/NIKA2/NIKA_SVN/Processing/Pipeline/Calibration/BP/'\n bpfile=caldir+'Transmission_2017_Jan_NIKA2_v1.fits'\n hdulist = fits.open(bpfile)\n\n if array == \"1H\": # 1mm (260 GHz) array, Horizontal Polarization\n tbdata = hdulist[1].data # 1H\n freq = tbdata.field(0)\n tran = tbdata.field(1)\n erro = tbdata.field(2)\n atmt = tbdata.field(3)\n cfreq1h = np.sum(freq*tran)/np.sum(tran)\n \n if array == \"1V\": # 1mm (260 GHz) array, Vertical Polarization\n tbdata = hdulist[2].data # 1V\n freq = tbdata.field(0)\n tran = tbdata.field(1)\n erro = tbdata.field(2)\n atmt = tbdata.field(3)\n cfreq1v = np.sum(freq*tran)/np.sum(tran)\n \n if array == \"2\": # 2mm (150 GHz) array\n tbdata = hdulist[3].data # 2\n freq = tbdata.field(0)\n tran = tbdata.field(1)\n erro = tbdata.field(2)\n atmt = tbdata.field(3)\n cfreq2 = np.sum(freq*tran)/np.sum(tran)\n\n ### Trim the zero-frequency listing, if any.\n gi=np.where(freq > 0)\n freq = freq[gi]\n tran = tran[gi]\n erro = erro[gi]\n atmt = atmt[gi]\n \n### Calculate Aperture efficiencies from information found at:\n### http://www.iram.es/IRAMES/mainwiki/Iram30mEfficiencies\n Beff = 0.630 # at 210 GHz\n Aeff = Beff/1.27 # See text on webpage\n srms = (66.0*u.um).to(\"m\") # surface RMS (microns)\n R210 = np.exp(-4.0*np.pi*(srms/(const.c/(2.1e11*u.s**-1))).value) #\n Gnot = Aeff/R210 # Unphysical, but see documentation...\n\n Larr = const.c.value/(freq*1.0e9) # Keep calm and carry on. \n Ruze = Gnot * np.exp(-4.0*np.pi*(srms.value)/Larr)\n NRuz = Ruze / np.max(Ruze) # Normalize it\n band = tran * Ruze # Bandpass, with (unnormalized) Ruze efficiency\n farr = freq\n \n#########################################################################\n\n if instrument == 'ACT90':\n srms = (27.0*u.um).to(\"m\") # surface RMS (microns)\n EA90 = 0.95 # I'm making this number up...\n R90 = np.exp(-4.0*np.pi*(srms/(const.c/(9.0e10*u.s**-1))).value) #\n Gnot = EA90/R90 # Unphysical, but see documentation...\n flow = 65.0 # GHz\n fhig = 125.0 # GHz\n farr = np.arange(flow,fhig,1.0) # frequency array.\n freq_ref = 90.0 # I took EA90 to be a fictitious aperature efficiency at 90 GHz\n band = ruze_eff(farr,freq_ref,EA90,srms)\n\n if instrument == 'ACT150':\n srms = (27.0*u.um).to(\"m\") # surface RMS (microns)\n EA90 = 0.95 # I'm making this number up...\n R90 = np.exp(-4.0*np.pi*(srms/(const.c/(9.0e10*u.s**-1))).value) #\n Gnot = EA90/R90 # Unphysical, but see documentation...\n flow = 120.0 # GHz\n fhig = 180.0 # GHz\n farr = np.arange(flow,fhig,1.0) # frequency array.\n freq_ref = 90.0 # I took EA90 to be a fictitious aperature efficiency at 90 GHz\n band = ruze_eff(farr,freq_ref,EA90,srms)\n\n\n return band, farr", "def resample(s, p, q, h=None):\n gcd = fractions.gcd(p,q)\n if gcd>1:\n p=p/gcd\n q=q/gcd\n \n if h is None: #design filter\n #properties of the antialiasing filter\n log10_rejection = -3.0\n stopband_cutoff_f = 1.0/(2.0 * max(p,q))\n roll_off_width = stopband_cutoff_f / 10.0\n \n #determine filter length\n #use empirical formula from [2] Chap 7, Eq. (7.63) p 476\n rejection_db = -20.0*log10_rejection;\n l = ceil((rejection_db-8.0) / (28.714 * roll_off_width))\n \n #ideal sinc filter\n t = arange(-l, l + 1)\n ideal_filter=2*p*stopband_cutoff_f*sinc(2*stopband_cutoff_f*t) \n \n #determine parameter of Kaiser window\n #use empirical formula from [2] Chap 7, Eq. (7.62) p 474\n beta = kaiser_beta(rejection_db)\n \n #apodize ideal filter response\n h = kaiser(2*l+1, beta)*ideal_filter\n\n ls = len(s)\n lh = len(h)\n\n l = (lh - 1)/2.0\n ly = ceil(ls*p/float(q))\n\n #pre and postpad filter response\n nz_pre = floor(q - mod(l,q))\n nz_pre = int(nz_pre)\n hpad = h[-lh+nz_pre:]\n\n offset = floor((l+nz_pre)/q)\n nz_post = 0;\n while ceil(((ls-1)*p + nz_pre + lh + nz_post )/q ) - offset < ly:\n nz_post += 1\n hpad = hpad[:lh + nz_pre + nz_post]\n\n #filtering\n xfilt = upfirdn(s, hpad, p, q)\n\n return xfilt[int(offset)-1:int(offset)-1+int(ly)]", "def __init__(self, power, T0_ps, center_wavelength_nm,\n time_window_ps = 10., frep_MHz = 100., NPTS = 2**10, \n GDD = 0, TOD = 0, chirp2 = 0, chirp3 = 0,\n power_is_avg = False):\n Pulse.__init__(self, frep_MHz = frep_MHz, n = NPTS)\n # make sure we weren't passed mks units \n assert (center_wavelength_nm > 1.0) \n assert (time_window_ps > 1.0 ) \n self.set_center_wavelength_nm(center_wavelength_nm) \n self.set_time_window_ps(time_window_ps)\n \n ### Generate pulse\n if not power_is_avg:\n # from https://www.rp-photonics.com/sech2_shaped_pulses.html\n self.set_AT( np.sqrt(power)/np.cosh(self.T_ps/T0_ps) )\n else:\n self.set_AT( 1 / np.cosh(self.T_ps/T0_ps) )\n self.set_AT(self.AT * np.sqrt( power / ( frep_MHz*1.0e6 * self.calc_epp()) ))\n \n self.chirp_pulse_W(GDD, TOD)\n self.chirp_pulse_T(chirp2, chirp3, T0_ps)", "def __init__(self, power, T0_ps, center_wavelength_nm,\n time_window_ps = 10., frep_MHz = 100., NPTS = 2**10, \n GDD = 0, TOD = 0, chirp2 = 0, chirp3 = 0,\n power_is_avg = False):\n\n Pulse.__init__(self, frep_MHz = frep_MHz, n = NPTS)\n # make sure we weren't passed mks units \n assert (center_wavelength_nm > 1.0) \n assert (time_window_ps > 1.0 ) \n self.set_center_wavelength_nm(center_wavelength_nm)\n self.set_time_window_ps(time_window_ps) \n \n GDD = GDD\n TOD = TOD\n \n # from https://www.rp-photonics.com/gaussian_pulses.html\n self.set_AT( np.sqrt(power) * np.exp(-2.77*0.5*self.T_ps**2/(T0_ps**2)) ) # input field (W^0.5) \n if power_is_avg: \n self.set_AT(self.AT * np.sqrt( power / ( frep_MHz*1.0e6 * self.calc_epp()) ))\n self.chirp_pulse_W(GDD, TOD)\n self.chirp_pulse_T(chirp2, chirp3, T0_ps)", "def add_signal(signal_array, json_file, indent_level, scale):\n\n logger.debug('+ Raw signal:{0}'.format(signal_array))\n\n initial_val = signal_array[1]\n # If no intial condition is defined give it an X, saves headache later. \n # issue a warning.\n if ( not(re.search('^[01xX]', signal_array[1])) ):\n signal_array[1] = str(scale) +'X'\n logger.warning(\n '+ Initial condition not defined for {0}. Force invalid \\'x\\''\n .format(signal_array[0])) \n for i,time_step in enumerate(signal_array[1:]):\n\n logger.debug('|---:{0} {1}'.format(i, time_step))\n\n if (re.search('X|x',time_step)):\n signal_array[i+1] = str(scale) + 'X'\n # FIXME: New not in documentation.\n # This is added to represent glitchiness or uncertanity.\n elif (re.search('G',time_step)):\n signal_array[i+1] = str(scale*.03) + 'T' + str(scale*.97) + 'T'\n # FIXME: New not in documentation\n # this is a simple encoding. 0.x will indicate an undef to 1 transition\n # which is not full cycle, and -0.x will show a undef to 0 transition\n # can potenitally be expanded to use x to decide proportion.\n # The combo indication is fixed to 0.25\n elif (re.search(r'0.\\d',time_step)):\n if (re.search(r'-0.\\d',time_step)):\n signal_array[i+1] = str(0.25*scale) + 'U' + str(0.75*scale) + 'L'\n else:\n signal_array[i+1] = str(0.25*scale) + 'U' + str(0.75*scale) + 'H'\n elif (re.search('0',time_step)):\n signal_array[i+1] = str(scale) + 'L'\n elif (re.search('1',time_step)):\n signal_array[i+1] = str(scale)+'H'\n elif (re.search('\\|', time_step)):\n signal_array[i+1] = 'S'\n temp = re.sub(r'\\d+([UDXLHC]).*',r'\\1',signal_array[i])\n signal_array[i+1] = ';[dotted]2' + temp + ';'\n else:\n # allow us to deal with a value change format by searching\n # backwards to find the last change from the current time step. The\n # search is to be performed on the waveform rendered so far.\n signal_array[i+1] = restore_after_spacer(signal_array[i],signal_array[i-1]) \n\n return signal_array", "def add_signal_to_noise(self):\n\n # noise\n noise = lal.CreateREAL8TimeSeries('blah', self.epoch, 0,\n self.td_noise.delta_t, lal.StrainUnit, \n int(self.td_noise.duration / self.td_noise.delta_t))\n noise.data.data = self.td_noise.data\n\n # signal\n signal = lal.CreateREAL8TimeSeries('blah',\n self.ext_params.geocent_peak_time, 0, self.td_signal.delta_t,\n lal.StrainUnit, int(self.td_signal.duration /\n self.td_signal.delta_t))\n signal.data.data = self.td_signal.data\n\n win = lal.CreateTukeyREAL8Window(len(signal.data.data),0.1)\n win.data.data[len(signal.data.data):] = 1.0\n #signal.data.data *= win.data.data\n\n # --- Scale to a target snr\n print '---'\n if self.target_snr is not None:\n\n tmp_sig = pycbc.types.TimeSeries(signal.data.data,\n delta_t=self.td_signal.delta_t)\n\n current_snr = pycbc.filter.sigma(tmp_sig, psd=self.psd,\n low_frequency_cutoff=self.f_low,\n high_frequency_cutoff=0.5/self.delta_t)\n\n signal.data.data *= self.target_snr / current_snr\n # ----\n\n # sum\n noise_plus_signal = lal.AddREAL8TimeSeries(noise, signal)\n\n self.td_response = \\\n pycbc.types.timeseries.TimeSeries(\\\n initial_array=np.copy(noise_plus_signal.data.data),\n delta_t=noise_plus_signal.deltaT,\n epoch=noise_plus_signal.epoch)\n\n # Finally, zero-pad the signal vector to have the same length as the actual data\n # vector\n no_noise = lal.CreateREAL8TimeSeries('blah', self.epoch, 0,\n self.td_noise.delta_t, lal.StrainUnit, \n int(self.td_noise.duration / self.td_noise.delta_t))\n\n no_noise.data.data = np.zeros(\\\n int(self.td_noise.duration / self.td_noise.delta_t))\n\n signal = lal.AddREAL8TimeSeries(no_noise, signal)\n\n self.td_signal = \\\n pycbc.types.timeseries.TimeSeries(initial_array=np.copy(signal.data.data),\n delta_t=signal.deltaT, epoch=noise_plus_signal.epoch)\n\n del noise, signal, noise_plus_signal", "def bake_signal(signal, sr, duration):\n if signal[\"type\"] == \"sine\":\n carrier_domain = numpy.linspace(0, duration, int(sr*duration), endpoint=False) * numpy.pi * 2\n carrier_freq = signal[\"frequency\"]\n carrier_mult = bake_multiplier(signal[\"multiplier\"], sr, duration)\n carrier_modf = bake_modifier(signal[\"modifier\"], sr, duration)\n baked_signal = numpy.sin(carrier_domain * carrier_freq + carrier_modf) * carrier_mult\n else:\n print(\"error\")\n exit(-1)\n return baked_signal", "def test_compress_works(self):\n tau = 45.0\n mrate = 60.0\n Mrate = 100.0\n gain = 5\n\n tmax = 50.0\n dt = 0.2\n\n self.rule.tau = tau\n self.rule.min_rate = mrate\n self.rule.max_rate = Mrate\n self.rule.compress_rates = False\n self.rule.gain = gain\n\n self.motor.error_fct = lambda t: (int_r(t/20.0)%3-1)*np.ones(self.Nsrc)\n\n M1 = simulation.StateMonitor(self.rule, 'out')\n\n sim1 = simulation.Simulation(self.source, self.motor, self.rule, M1, dt=dt)\n sim1.run(tmax)\n\n # make sure we normally go outside the range\n self.assertGreater(np.sum(M1.out < mrate), 0)\n self.assertGreater(np.sum(M1.out > Mrate), 0)\n\n self.rule.compress_rates = True\n\n M2 = simulation.StateMonitor(self.rule, 'out')\n\n sim2 = simulation.Simulation(self.source, self.motor, self.rule, M2, dt=dt)\n sim2.run(tmax)\n\n self.assertEqual(np.sum(M2.out < mrate), 0)\n self.assertEqual(np.sum(M2.out > Mrate), 0)", "def adc(self, signal):", "def compress(self, samples):\n rms = np.sqrt(np.dot(samples, samples) / window)\n power = self.power * (1.0 - self.smooth) + rms * self.smooth\n self.power = power\n if power <= 1e-40:\n samples *= 0\n return\n db_in = 10.0 * math.log10(power)\n if db_in <= self.limit:\n samples *= 0\n return\n db_out = self.cf(db_in)\n db_gain = db_out - db_in + self.postgain\n gain = 10**(0.1 * db_gain)\n samples *= gain", "def __init__(self, time_window_ps, center_wavelength_nm, power,frep_MHz = 100., NPTS = 2**10,\n power_is_avg = False,\n fileloc = '',\n flip_phase = True):\n Pulse.__init__(self, frep_MHz = frep_MHz, n = NPTS)\n try:\n self.fileloc = fileloc\n # make sure we weren't passed mks units\n assert (center_wavelength_nm > 1.0) \n assert (time_window_ps > 1.0 )\n self.set_time_window_ps(time_window_ps)\n self.set_center_wavelength_nm(center_wavelength_nm) # reference wavelength (nm) \n \n # power -> EPP\n if power_is_avg:\n power = power / self.frep_mks\n \n # Read in retrieved FROG trace\n frog_data = np.genfromtxt(self.fileloc)\n wavelengths = frog_data[:,0]# (nm)\n intensity = frog_data[:,1]# (arb. units)\n phase = frog_data[:,2]# (radians)\n\n if flip_phase:\n phase = -1 * phase\n \n pulse_envelope = interp1d(wavelengths, intensity, kind='linear',\n bounds_error=False,fill_value=0)\n phase_envelope = interp1d(wavelengths, phase, kind='linear', \n bounds_error=False,fill_value=0)\n \n gridded_intensity = pulse_envelope(self.wl_nm)\n gridded_phase = phase_envelope(self.wl_nm)\n\n # Calculate time domain complex electric field A\n self.set_AW(gridded_intensity*np.exp(1j*gridded_phase))\n # Calculate normalization factor to achieve requested \n # pulse energy\n e_scale = np.sqrt(power / self.calc_epp() )\n self.set_AT(self.AT * e_scale )\n\n except IOError:\n print ('File not found.' )", "def main():\r\n\r\n ### Choose and Import File\r\n\r\n inSound = Sound()\r\n\r\n rate = inSound.rate\r\n data = inSound.data\r\n dataLength = len(data)\r\n \r\n info = inSound.get_info()\r\n head, filename = os.path.split(info[0]) # get filename of input\r\n \r\n # Decide output directory and filename\r\n outDir = r'out'\r\n outFile = os.path.join(outDir, 'out_'+filename)\r\n\r\n # Check if data has multiple channels, if yes use only one\r\n if(len(data.shape) > 1):\r\n data = data[:,0]\r\n\r\n\r\n ### Set All Parameters\r\n\r\n #get parameters from user dialogue\r\n params = getParameters()\r\n\r\n numChannels = params['numChannels'][0] # number of Channels\r\n loFreq = params['loFreq'][0] # lower bound on frequencies\r\n hiFreq = params['hiFreq'][0] # upper bound on frequencies\r\n plotChannels = params['plotChannels'][0] # if it should plot the Gammatone channels\r\n block_time = params['block_time'][0] # in ms\r\n block_shift = params['block_shift'][0] # in ms\r\n selectChannels = params['selectChannels'][0] # number of channels to activate at a single time\r\n\r\n\r\n ### Filter input file\r\n\r\n filtered, channel_fs = filterDataGamaTone(data, rate, numChannels, loFreq, hiFreq, plotChannels)\r\n\r\n\r\n ### Gammatones -> Stimulation Amplitude for time block\r\n\r\n samples_in_block = np.floor(block_time * rate / 1000).astype('int')\r\n samples_in_shift = np.floor(block_shift * rate / 1000).astype('int')\r\n\r\n summed = gammatoneToAmplitude(filtered, samples_in_block, samples_in_shift)\r\n\r\n # only activate the n electrodes that have the largest stimulation\r\n amps = n_largest_channels(summed, n=selectChannels)\r\n\r\n \r\n #### Sound reconstruction\r\n\r\n # for each timeblock we need to duplicate enough samples to fill it at sample rate\r\n amps_samples = np.repeat(amps, samples_in_shift, axis=1)\r\n #trim end to get same length as input\r\n amps_samples = amps_samples[:,:dataLength] \r\n\r\n # from amplitude samples and frequencies, reconstruct sound\r\n res_data = generateSound(amps_samples, channel_fs, rate)\r\n\r\n\r\n ### Write to output file\r\n write(outFile, rate, res_data)\r\n print('Wrote file to: \\n' + outFile)", "def __init__(self, power, FWHM_ps, center_wavelength_nm,\n time_window_ps = 10., frep_MHz = 100., NPTS = 2**10, \n GDD = 0, TOD = 0, chirp2 = 0, chirp3 = 0,\n power_is_avg = False):\n Pulse.__init__(self, frep_MHz = frep_MHz, n = NPTS)\n # make sure we weren't passed mks units \n assert (center_wavelength_nm > 1.0) \n assert (time_window_ps > 1.0 ) \n self.set_center_wavelength_nm(center_wavelength_nm) \n self.set_time_window_ps(time_window_ps)\n\n T0_ps = FWHM_ps/3.7909885\n ### Generate pulse\n if not power_is_avg:\n # numpy.sinc is sin(pi*x)/(pi*x), so we divide by pi\n self.set_AT( np.sqrt(power) * np.sinc(self.T_ps/(T0_ps*np.pi)) ) \n else:\n self.set_AT( 1 / np.sinc(np.pi * self.T_ps/(T0_ps*np.pi)) )\n self.set_AT(self.AT * np.sqrt( power / ( frep_MHz*1.0e6 * self.calc_epp()) ))\n \n self.chirp_pulse_W(GDD, TOD)\n self.chirp_pulse_T(chirp2, chirp3, T0_ps)", "def fillSignalTemplates(opt):\n\n totalSig={}\n templates=[]\n\n #import signal events\n data=ROOT.TChain('data')\n data.AddFile(os.path.join(opt.input,opt.sig))\n\n #define final preselection cuts\n cuts='xangle==%d'%opt.xangle\n if len(opt.presel) : cuts += ' && ' + opt.presel\n if opt.csiacc:\n csiCuts ='csi1>%f && csi1<%f && '%opt.csiacc[opt.xangle][0]\n csiCuts+='csi2>%f && csi2<%f'%opt.csiacc[opt.xangle][1]\n cuts=csiCuts if len(cuts)==0 else '{0} && {1}'.format(cuts,csiCuts)\n\n #loop over categories build templates\n for icat in range(len(opt.categs)):\n\n #apply category cuts\n categCut=opt.categs[icat]\n categCut=cuts if len(categCut)==0 else '%s && %s'%(categCut,cuts)\n\n catName='%s_a%d_%d'%(opt.chTag,opt.xangle,icat)\n print '\\t',catName,categCut\n\n #signal modelling histograms\n histos=[]\n for name,pfix in [('sig_'+catName,''),('sig_%s_sigShape'%catName,'mix')]:\n\n templCuts=categCut.replace('csi1',pfix+'csi1')\n templCuts=templCuts.replace('csi2',pfix+'csi2')\n wgtExpr='wgt*%f'%(SIGNALXSECS[opt.xangle]*opt.lumi)\n data.Draw('{0}mmiss >> h({1},{2},{3})'.format(pfix,opt.nbins,opt.mMin,opt.mMax),\n '{0}*({1})'.format(wgtExpr,templCuts),\n 'goff')\n h=data.GetHistogram()\n histos.append( h.Clone(name) ) \n histos[-1].SetDirectory(0)\n\n if len(histos)==1:\n totalSig[icat]=h.Integral()\n\n h.Reset('ICE')\n templates += defineProcessTemplates(histos)\n \n print '\\t total signal:',totalSig\n return totalSig,templates", "def process_raw_data_eng(xCar, xDate, xDir, xFilename, bFirst, gZIP, xOut, initialTimeBack,\n shift, maxSpeed='45', minSpeed='2'):\n import pandas as pd\n from datetime import datetime\n import os\n import gzip\n import sys\n from math import floor\n try:\n xMaxCarSpeed = float(maxSpeed) / 2.23694 # CONVERTED TO M/S (default is 45mph)\n xMinCarSpeed = float(minSpeed) / 2.23694 # CONVERTED TO M/S (default is 2mph)\n\n ########################################################################\n #### WE DON'T HAVE AN RSSI INPUT\n ### (SO THIS IS A PLACEHOLDER FOR SOME SORT OF QA/QC VARIABLE)\n ## xMinRSSI = 50 #if RSSI is below this we don't like it\n ##################################################################\n\n # reading in the (.txt) data with specific headers --> need to change this\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54\n # sHeader = \"Time Stamp,Inlet Number,P (mbars),T (degC),CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Latitude,Longitude\"\n # sHeader = \"Time Stamp,Inlet Number,P (mbars),T (degC),CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Latitude,Longitude\"\n # sHeader = \"Time Stamp,Inlet Number,P (mbars),T0 (degC),T5 (degC), Laser PID Readout,Det PID Readout,win0Fit0,win0Fit1,win0Fit3,win1Fit4,win0Fit5,win0Fit6,win0Fit7,win0Fit8,win0Fit9,win1Fit0,win1Fit1,win1Fit2,win1Fit3,win1Fit4,win1Fit5,win1Fit6,Det Bkgd,Ramp Ampl,CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Battery T (degC),FET T (degC),GPS Time,Latitude,Longitude\"\n sHeader = \"Time Stamp,Inlet Number,P (mbars),T0 (degC),T5 (degC),Laser PID Readout,Det PID Readout,win0Fit0,win0Fit1,win0Fit2,win0Fit3,win0Fit4,win0Fit5,win0Fit6,win0Fit7,win0Fit8,win0Fit9,win1Fit0,win1Fit1,win1Fit2,win1Fit3,win1Fit4,win1Fit5,win1Fit6,Det Bkgd,Ramp Ampl,CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Battery T (degC),FET T (degC),GPS Time,Latitude,Longitude\"\n sOutHeader = \"DATE,TIME,SECONDS,NANOSECONDS,VELOCITY,U,V,W,BCH4,BRSSI,TCH4,TRSSI,PRESS_MBAR,INLET,TEMPC,CH4,H20,C2H6,R,C2C1,BATTV,POWMV,CURRMA,SOCPER,LAT,LONG\\n\"\n\n headerNames = sHeader.split(',')\n GPS_loc = 37 # Where the GPS data is located (in the row)\n\n infoHeader = \"FILENAME\\n\"\n\n # gZIP is indicating if it is a ZIP file (I don't think I've written this in)\n if gZIP == 0:\n f = gzip.open(xDir + \"/\" + xFilename,'r')\n else:\n #f = open(xDir + \"/\" + xFilename, 'r')\n f = open(xDir + xFilename, 'r')\n\n ### FIGURING OUT DATE FROM FILENAME (WILL NEED TO CHANGE THIS IF DIFFERENT FILENAME)\n xdat = str('20') + xFilename[11:17]\n\n fnOut = xOut + xCar + \"_\" + xdat + \"_dat.csv\" # set CSV output for raw data\n fnLog = xOut + xCar + \"_\" + xdat + \"_log.csv\" # output for logfile\n infOut = xOut + xCar + \"_\" + xdat + \"_info.csv\"\n\n # FINDING THE FIRST TIME NOTED\n firsttime = int(float(open(xDir + xFilename).readlines().pop(1).split(',')[37][:-4]))\n\n ## MAKING TEMPORARY FILE (FOR IF LATER YOU HAVE TO ADD A DATE)\n fnOutTemp = xOut + xCar + \"_\" + xdat + \"temp_dat.csv\" #\n\n if bFirst:\n # 3fOut = open(fnOutTemp, 'w')\n # fOut.write(sOutHeader)\n fLog = open(fnLog, 'w')\n infOut = open(infOut, 'w')\n infOut.write(infoHeader)\n print(f\"fnLog:{fnOut}\")\n if not bFirst:\n fOut = open(fnOut, 'a')\n fLog = open(fnLog, 'a')\n infOut = open(infOut, 'a')\n\n fOut = open(fnOutTemp, 'w')\n fOut.write(sOutHeader)\n\n # READ IN THE LINES\n xCntObs = -1\n xCntGoodValues = 0\n for row in f:\n bGood = True\n if xCntObs < 0:\n bGood = False\n xCntObs += 1\n\n if bGood:\n lstS = row.split(',')\n gpstime = lstS[GPS_loc]\n dtime = lstS[0]\n dt = lstS[1]\n time_dt = lstS[2]\n epoch = lstS[3]\n # nano = lstS[4]\n\n gps_time = lstS[37]\n dateob = datetime.fromtimestamp(int(gps_time[:-4]))\n nano = gps_time[-4:]\n\n # dateob = datetime(int(dt[0:4]),int(dt[5:7]),int(dt[8:10]),int(time_dt[0:2]),int(time_dt[3:5]),int(time_dt[6:8]),int(float(nano)*1e-9))\n\n dtime = int(dateob.strftime('%Y%m%d%H%M%S'))\n # Date = dateob.strftime('%m%/%d/%Y')\n Date = dateob.strftime('%Y-%m-%d')\n\n GPS_Time = dateob.strftime('%H%:%M:%S')\n seconds = floor(float(gpstime))\n nano = dateob.strftime('%f')\n\n # dateob = datetime(int(dtime[6:10]),int(dtime[0:2]),int(dtime[3:5]),int(dtime[11:13]),int(dtime[14:16]),int(dtime[17:19]),int(float(dtime[19:23])*1000000))\n # epoch = dateob.strftime('%s.%f')\n\n # THIS IS USING THE CSU METHOD. IN OUR METHOD, WE DO THE SPEED LATER IN THE ALGORITHM.\n\n # # if RSSI of bottome sensor is below 50 if float(lstS[28]) < xMinRSSI: fLog.write(\"RSSI (Bottom)\n # value less than 50: \"+ str(lstS[28]) + \"\\n\") continue # Car Speed if float(lstS[12]) >\n # xMaxCarSpeed: fLog.write(\"Car speed of \" + str(float(lstS[12])) + \" exceeds max threshold of: \" +\n # str(xMaxCarSpeed) + \"\\n\") continue if float(lstS[12]) < xMinCarSpeed: fLog.write(\"Car speed of \" +\n # str(float(lstS[12])) + \" less than min threshold of: \" + str(xMinCarSpeed) + \"\\n\") continue\n\n # For some reason it is producing its longitude in positive number while USA is located at negative longitude\n # thats why we do -1 * float(lstS[7])\n\n # fix this when we have stuffs\n\n # s1 = str(lstS[1])+\",\"+str(lstS[2])+\",\"+str(lstS[3])+\",\"+str(lstS[4])+\",\"+str(lstS[6])+\",\"\n # s1 += str(-1 * float(lstS[7]))+\",\"+str(lstS[12])+\",\"+str(lstS[14])+\",\"+str(lstS[15])+\",\"+str(lstS[16])+\",\"+str(lstS[25])+\",\"\n # s1 += str(lstS[28])+\",\"+str(lstS[38])+\",\"+str(lstS[41])+\"\\n\"\n\n ## choosing what to write in the .csv\n\n # if sys.platform.startswith('win'):\n # ## DATE, TIME, SECONDS,NANOSECONDS\n # csvWrite = str(dateob.strftime('%Y-%m-%d')) + ',' + str(dateob.strftime('%H:%M:%S')) + ',' + str(\n # float(pd.to_numeric(dateob.strftime('%S.%f')))) + ',' + str(\n # pd.to_numeric(dateob.strftime('%f')) * 1000) + str(',')\n # ## VELOCITY, U,V,W,BCH4,BRSSI,TCH4\n # csvWrite += str('50') + ',' + str('0') + ',' + str('0') + ',' + str('0') + ',' + str(\n # lstS[26]) + ',' + str('0') + ',' + str(lstS[26]) + ','\n # ## TRSSI, PRESS_MBAR, INLET, TEMPC, CH4, H20,C2H6\n # csvWrite += str('0') + ',' + str(lstS[2]) + ',' + str(lstS[1]) + ',' + str(lstS[3]) + ',' + str(\n # lstS[26]) + ',' + str(lstS[27]) + ',' + str(lstS[28]) + ','\n # # R, C2C1, BATTV, POWMV,CURRMA, SOCPER,LAT,LONG\n # csvWrite += str(lstS[29]) + ',' + str(lstS[30]) + ',' + str(lstS[31]) + ',' + str(\n # lstS[32]) + ',' + str(lstS[33]) + ',' + str(lstS[34]) + ',' + str(lstS[38]) + str(',') + str(\n # lstS[39])\n\n # =============================================================================\n # if not sys.platform.startswith('win'):\n # ## DATE, TIME, SECONDS,NANOSECONDS\n # csvWrite = str(dateob.strftime('%Y-%m-%d')) + ',' + str(dateob.strftime('%H:%M:%S')) + ',' + str((int(floor(pd.to_numeric(dateob.strftime('%s.%f')))))) + ',' + str((pd.to_numeric(dateob.strftime('%f')) *1000)) + str(',')\n # ## VELOCITY, U,V,W,BCH4,BRSSI,TCH4\n # csvWrite += str('50') + ',' + str('0') + ',' + str('0') + ',' + str('0') + ',' + str(lstS[26]) + ',' + str('0') + ','+ str(lstS[26]) + ','\n # ## TRSSI, PRESS_MBAR, INLET, TEMPC, CH4, H20,C2H6\n # csvWrite += str('0') + ',' + str(lstS[2]) + ',' + str(lstS[1]) + ',' + str(lstS[3]) + ',' + str(lstS[26]) + ',' + str(lstS[27]) +',' + str(lstS[28]) + ','\n # # R, C2C1, BATTV, POWMV,CURRMA, SOCPER,LAT,LONG\n # csvWrite += str(lstS[29]) + ',' + str(lstS[30]) + ',' + str(lstS[31]) + ',' + str(lstS[32]) + ','+ str(lstS[33]) + ',' + str(lstS[34]) + ',' + str(lstS[38]) + str(',') + str(lstS[39][:-1]) + str('\\n')\n # #fOut.write('\\n')\n # fOut.write(csvWrite)\n # #fOut.write('\\n')\n #\n # =============================================================================\n # if not sys.platform.startswith('win'):\n if 1 == 1:\n ## DATE, TIME, SECONDS,NANOSECONDS\n csvWrite = str(Date) + ',' + str(GPS_Time) + ',' + str(seconds) + ',' + str(nano) + str(',')\n ## VELOCITY, U,V,W,BCH4,BRSSI,TCH4\n csvWrite += str('50') + ',' + str('0') + ',' + str('0') + ',' + str('0') + ',' + str(\n lstS[26]) + ',' + str('0') + ',' + str(lstS[26]) + ','\n ## TRSSI, PRESS_MBAR, INLET, TEMPC, CH4, H20,C2H6\n csvWrite += str('0') + ',' + str(lstS[2]) + ',' + str(lstS[1]) + ',' + str(lstS[3]) + ',' + str(\n lstS[26]) + ',' + str(lstS[27]) + ',' + str(lstS[28]) + ','\n # R, C2C1, BATTV, POWMV,CURRMA, SOCPER,LAT,LONG\n csvWrite += str(lstS[29]) + ',' + str(lstS[30]) + ',' + str(lstS[31]) + ',' + str(\n lstS[32]) + ',' + str(lstS[33]) + ',' + str(lstS[34]) + ',' + str(lstS[38]) + str(',') + str(\n lstS[39])\n # fOut.write('\\n')\n\n #### REMOVING THE FIRST BIT OF DATA (if you need to )\n if seconds >= (firsttime + (60 * float(initialTimeBack))):\n fOut.write(csvWrite)\n\n del (csvWrite)\n # xCntGoodValues += 1\n\n xCntObs += 1\n\n # sOut = str(gZIP) + \",\" + str(f) + \",\" + str(xCntObs) + \",\" + str(xCntGoodValues) + \"\\n\"\n # fLog.write(sOut)\n\n infOut.write(str(xFilename) + '\\n')\n\n fOut.close()\n fLog.close()\n infOut.close()\n\n # xDate = dateob.strftime(\"%Y%m%d\")\n\n # newfnOut = xOutDir + xCar + \"_\" + xDate + \"_dat.csv\" #set CSV output for raw data\n # newfnLog = xOutDir + xCar + \"_\" + xDate + \"_log.csv\"\n\n # print(xCar + \"\\t\" + xdat + \"\\t\" + fnOut[-22:] + \"\\t\" + str(xCntObs) + \"\\t\" + str(xCntGoodValues) + \"\\t\" + str(\n # gZIP))\n\n print(f\"{xCar} \\t {xdat} \\t {fnOut[-(17 + len(xCar)):]} \\t {xCntObs} \\t {xCntGoodValues} \\t {gZIP}\")\n\n\n import numpy as np\n radians = False\n wind_df = pd.read_csv(fnOutTemp)\n wind_df['QUADRANT'] = wind_df.apply(lambda row: get_quadrant(row['U'], row['V']), axis=1)\n\n wind_df['secnan'] = wind_df.apply(lambda row: row['SECONDS'], axis=1) # + row['NANOSECONDS']*1e-9,axis=1)\n wind_df['prev_LAT'] = wind_df.LAT.shift(periods=1)\n wind_df['next_LAT'] = wind_df.LAT.shift(periods=-1)\n wind_df['prev_LONG'] = wind_df.LONG.shift(periods=1)\n wind_df['next_LONG'] = wind_df.LONG.shift(periods=-1)\n wind_df['prev_TIME'] = wind_df.secnan.shift(periods=1)\n wind_df['next_TIME'] = wind_df.secnan.shift(periods=-1)\n wind_df['distance'] = wind_df.apply(\n lambda row: haversine(row['prev_LAT'], row['prev_LONG'], row['next_LAT'], row['next_LONG']), axis=1)\n wind_df['bearing'] = wind_df.apply(\n lambda row: calc_bearing(row['prev_LAT'], row['next_LAT'], row['prev_LONG'], row['next_LONG'], radians),\n axis=1)\n wind_df['timediff'] = wind_df.apply(lambda row: row['next_TIME'] - row['prev_TIME'], axis=1)\n wind_df['VELOCITY'] = wind_df.apply(lambda row: calc_velocity(row['timediff'], row['distance']), axis=1)\n wind_df['U_cor'] = wind_df.apply(lambda row: row['U'] + row['VELOCITY'], axis=1)\n wind_df['horz_length'] = wind_df.apply(lambda row: np.sqrt(row['U_cor'] ** 2 + row['V'] ** 2), axis=1)\n wind_df['uncor_theta'] = wind_df.apply(\n lambda row: calc_bearing(row['U_cor'], row['V'], row['QUADRANT'], row['horz_length'], radians), axis=1)\n wind_df['adj_theta'] = wind_df.apply(lambda row: (row['uncor_theta'] + row['bearing']) % 360, axis=1)\n wind_df['totalWind'] = wind_df.apply(lambda row: np.sqrt(row['horz_length'] ** 2 + row['W'] ** 2), axis=1)\n wind_df['phi'] = wind_df.apply(lambda row: np.arctan(row['horz_length']), axis=1)\n wind_df['shift_CH4'] = wind_df.CH4.shift(periods=int(float(shift)))\n wind_df['raw_CH4'] = wind_df.apply(lambda row: row['BCH4'], axis=1)\n wind_df['BCH4'] = wind_df.loc[:, ['shift_CH4']]\n wind_df['CH4'] = wind_df.loc[:, ['shift_CH4']]\n wind_df['TCH4'] = wind_df.loc[:, ['shift_CH4']]\n\n wind_df2 = wind_df[wind_df.CH4.notnull()]\n wind_df3 = wind_df2.drop(\n ['QUADRANT', 'secnan', 'prev_LAT', 'next_LAT', 'prev_LONG', 'next_LONG', 'prev_TIME', 'next_TIME',\n 'distance', 'timediff', 'uncor_theta', 'CH4'], axis=1)\n wind_df3['CH4'] = wind_df3.loc[:, 'shift_CH4']\n wind_df3 = wind_df3.drop(['shift_CH4'], axis=1).loc[:,\n ['DATE', 'TIME', 'SECONDS', 'NANOSECONDS', 'VELOCITY', 'U', 'V', 'W', 'BCH4', 'BRSSI', 'TCH4',\n 'TRSSI',\n 'PRESS_MBAR', 'INLET', 'TEMPC', 'CH4', 'H20', 'C2H6', 'R', 'C2C1', 'BATTV', 'POWMV', 'CURRMA',\n 'SOCPER',\n 'LAT', 'LONG', 'bearing', 'U_cor', 'horz_length', 'adj_theta', 'totalWind', 'phi', 'raw_CH4']]\n wind_df4 = add_odometer(wind_df3.loc[wind_df3.totalWind.notnull(), :], 'LAT', 'LONG')\n wind_df5 = wind_df4.loc[wind_df4.VELOCITY > xMinCarSpeed, :]\n wind_df4 = wind_df5.loc[wind_df5.VELOCITY < xMaxCarSpeed, :].copy().drop_duplicates()\n if bFirst:\n wind_df4.to_csv(fnOut, index=False)\n elif not bFirst:\n norm = pd.read_csv(fnOut)\n pd.concat([norm, wind_df4]).sort_values(by='SECONDS').reset_index(drop=True).to_csv(fnOut, index=False)\n os.remove(fnOutTemp)\n return True\n except ValueError:\n return False", "def get_scale_data(pin, timeout=1.0):\n timestamp = time.monotonic()\n with pulseio.PulseIn(pin, maxlen=96, idle_state=True) as pulses:\n pulses.pause()\n pulses.clear()\n pulses.resume()\n\n while len(pulses) < 35:\n if (time.monotonic() - timestamp) > timeout:\n raise RuntimeError(\"Timed out waiting for data\")\n pulses.pause()\n bits = [0] * 96 # there are 12 bytes = 96 bits of data\n bit_idx = 0 # we will count a bit at a time\n bit_val = False # first pulses will be LOW\n print(pulses[1])\n for i in range(len(pulses)):\n if pulses[i] == 65535: # This is the pulse between transmits\n break\n num_bits = int(pulses[i] / 75 + 0.5) # ~14KHz == ~7.5us per clock\n #print(\"%d (%d),\" % (pulses[i], num_bits), end='')\n for bit in range(num_bits):\n #print(\"bit #\", bit_idx)\n bits[bit_idx] = bit_val\n bit_idx += 1\n if bit_idx == 96: # we have read all the data we wanted\n #print(\"DONE\")\n break\n bit_val = not bit_val\n #print(bits)\n data_bytes = [0] * 12\n for byte_n in range(12):\n thebyte = 0\n for bit_n in range(8):\n thebyte <<= 1\n thebyte |= bits[byte_n*8 + bit_n]\n data_bytes[byte_n] = thebyte\n print([hex(i) for i in data_bytes])\n # do some very basic data checking\n if data_bytes[0] != 3 or data_bytes[1] != 3 or data_bytes[7] != 4 \\\n or data_bytes[8] != 0x1C or data_bytes[9] != 0 or data_bytes[10] \\\n or data_bytes[11] != 0:\n raise RuntimeError(\"Bad data capture\")\n reading = ScaleReading()\n\n reading.stable = data_bytes[2] & 0x4\n reading.units = data_bytes[3]\n reading.weight = data_bytes[5] + (data_bytes[6] << 8)\n if data_bytes[2] & 0x1:\n reading.weight *= -1\n if reading.units == ScaleReading.OUNCES:\n # oi no easy way to cast to int8_t\n if data_bytes[4] & 0x80:\n data_bytes[4] -= 0x100\n reading.weight *= 10 ** data_bytes[4]\n return reading", "def signal_process( Qin, source, stop_flag, log, pos_ref, FIX_1BIT_ERRORS=False ):\n\n\trow_size = 16 + 112*2 #240 bits\n\t\n\twhile( not stop_flag.is_set() ):\n\t\tcurr_time = time.strftime('%d/%b/%Y %H:%M:%S', time.localtime())\n\t\t\n\t\t# Get streaming chunk from sdr_read thread\n\t\ty = Qin.get();\n\t\t\t\n\t\tpacket_diff = len(packets)\n\t\tidx_preamble, noise_floor = asp.detectPreamble(y)\t\t\n\t\tfor n in idx_preamble:\n\t\t\tsignal = abs(y[int(n) : int(n) + row_size])\n\t\t\tmsg = asp.decode_ADSB( signal, FIX_1BIT_ERRORS )\n\t\t\tif msg != None:\n\t\t\t\tsnr = asp.SNR(signal, noise_floor)\n\t\t\t\tpkt = ao.Packet(msg, time.time(), snr)\n\t\t\t\tpackets.append( pkt )\n\t\t\t\tprint( '!' , end='', flush=True )\n\t\t\t\t\n\t\t\t\tif len(packets) > PACKET_BUFF_SIZE:\n\t\t\t\t\tpackets.pop(0)\n\t\t\t\t\n\t\t\t\tif log != None:\n\t\t\t\t\ttry:\n\t\t\t\t\t\twith open(log, 'a') as f:\n\t\t\t\t\t\t\tf.write(f\"[{curr_time}] {msg}\\n\")\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint(f\"Error writing to {log}!\")\n\t\t\t\t\n\t\t\t\tif pkt.icao in planes:\n\t\t\t\t\tplanes[pkt.icao].process_packet( pkt )\n\t\t\t\telif pkt.icao != None:\n\t\t\t\t\tplanes[pkt.icao] = ao.Plane( pkt, pos_ref )\n\t\t\n\t\tif packet_diff == len(packets):\n\t\t\t# packets.append(f\"[{curr_time}] None received...\")\n\t\t\tprint( '.' , end='', flush=True )\n\t\t\n\t\t# Remove excess objects\n\t\tto_delete = []\n\t\tfor p in planes.keys():\n\t\t\tif (time.time() - planes[p].last_update) >= TTL:\n\t\t\t\tto_delete.append(p)\n\t\t\n\t\tfor p in to_delete:\n\t\t\tplanes.pop(p)\n\t\t\n\t\tQin.queue.clear()", "def __init__(\n self,\n pin,\n min_pulse_width=500,\n max_pulse_width=2500,\n min_full_sweep_time=0.5,\n rotation_update_freq=25,\n ):\n assert (\n 500 <= min_pulse_width <= 2500\n ), \"min_pulse_width should be between 500 and 2500\"\n assert (\n 500 <= max_pulse_width <= 2500\n ), \"max_pulse_width should be between 500 and 2500\"\n assert (\n min_pulse_width < max_pulse_width\n ), \"min_pulse_width should be less than max_pulse_width\"\n assert (\n min_full_sweep_time > 0\n ), \"min_full_sweep_time should be positive\"\n assert (\n rotation_update_freq > 0\n ), \"rotation_update_freq should be positive\"\n\n self._pin = pin\n self._min_pulse_width = min_pulse_width\n self._max_pulse_width = max_pulse_width\n self._mid_pulse_width = (max_pulse_width + min_pulse_width) / 2\n self._max_pos_change_per_update = 2 / (\n rotation_update_freq * min_full_sweep_time\n )\n self._rotation_update_freq = rotation_update_freq\n self._latest_rotation_start_time = None\n self._position = None\n self._rotation_speed = 0\n self._stopped = False\n\n self._pi = pigpio.pi()\n if not self._pi.connected:\n raise RuntimeError(\"Could not connect to pigpio daemon\")\n self._pi.set_mode(self._pin, pigpio.OUTPUT)\n self._pi.set_servo_pulsewidth(self._pin, 0)", "def nxz(PAxz,PBxz,Npulse,P_times_Dj):\n return PAxz*PBxz*Npulse*P_times_Dj", "def visit_pulseexpr(self, state, channels, t, n_samp, waveform, phase):\n pass", "def magic_sample(self, ys):\n\n #for each non-zero element in y\n #we want to multiply the initial state by HGate(i) SGate(i) HGate(i)\n #this turns out to be equivalent to multiplying the whole final state by\n #U H_k S_k H_k U^\\dagger\n #but H_k S_k H_k = e^{i\\pi/4} \\frac{1}{\\sqrt{2}} (I -i X_k)\n #so now we evolve identity forward by U (trivial)\n #and evolve X_k forward by U (using the AGState)\n #then we have to send the resulting Pauli through UC and UH\n #giving a third Pauli\n #then the state is of the form (we^{i\\pi/4}) UC UH (I + i^d P)/sqrt(2) |s>\n #then we apply Bravyi et al's prop. 4 to turn this into a new ch form\n \n\n chCopy = deepcopy(self.chState) #we update this copy as we go\n\n for i, y in enumerate(ys):\n if y:\n #we want to know what U_c^\\dagger U X_i U^\\dagger U_c is\n #firstly we use the A-G info\n # U X_i U^\\dagger is the i'th destabiliser\n x = self.agState.x[self.n+i]\n z = self.agState.z[self.n+i]\n r = self.agState.r[self.n+i]\n\n #print(x,z,r)\n x_col = np.array([x]).T\n z_col = np.array([z]).T\n \n #now we apply U_c to this using the CH-form info\n x_mat = chCopy.F * x_col\n z_mat = (chCopy.M * x_col + chCopy.G*z_col) % np.uint8(2)\n r = (r + util.sort_pauli_string(x_mat, z_mat)) % np.uint8(2)\n\n u = (x @ chCopy.F) % np.uint8(2)\n h = (x @ chCopy.M + z @ chCopy.G) % np.uint8(2)\n\n g = (x @ (z + chCopy.g)) % np.uint8(4)\n\n #now U_c^dag U X_i U^dag U_C = (-1)^r i^g prod_j Z_j^{h_j} X_j^{u_j}\n #we want to conjugate this by U_H\n #everywhere chCopy.v == 1 we flip a z to an x and an x to a z\n #everywhere chCopy.v == 1 and u == 1 and h == 1 we need to swap the order of our x and z so we get a minus sign\n\n u2 = u*(np.uint8(1) ^ chCopy.v) ^ (h*chCopy.v)\n h2 = (u*chCopy.v) ^ (h*(np.uint8(1) ^ chCopy.v))\n\n r = (r + (u*h*chCopy.v).sum()) % np.uint8(2)\n \n \n #now U_H^dag U_c^dag U X_i U^dag U_C U_H = (-1)^r i^g prod_j Z_j^{h2_j} X_j^{u2_j}\n\n t = u2 ^ chCopy.s\n r = (r + h2 @ t) % np.uint8(2)\n\n #now we have w UC UH |s> = w (-1)^r (i)^g UC UH |t>\n\n if all(t == chCopy.s):\n chCopy.w *= np.exp(1j*np.pi/4) * (1 + (1j)**(g+2*r -1) )/ np.sqrt(2)\n else:\n phase, VCList, v, s = util.desuperpositionise(chCopy.s, t, (g+2*r -1)%np.uint8(4), chCopy.v)\n\n chCopy.w *= phase*np.exp(1j*np.pi/4)/np.sqrt(2)\n chCopy.v = v\n chCopy.s = s\n\n for gate in VCList:\n gate.rightMultiplyC(chCopy)\n \n return chCopy", "def __band_filter(data: dict, lowFreq: Union[int, float], highFreq: Union[int, float], timestep: int=0,\n samplingFreq: int=240, order: int=5, eegSensor: int=0, filterType: str='bandpass',\n lengthOfTestSeconds: Union[int, float]=32, example: int=0) -> dict:\n #Test\n # Filter.__band_filter_test(data=data, low=lowFreq, high=highFreq, samplingFreq=samplingFreq, order=order,\n # eegSensor=eegSensor, filterType=filterType, lengthOfTestSeconds=lengthOfTestSeconds)\n #Code\n nyq = 0.5 * samplingFreq\n low = lowFreq / nyq\n high = highFreq / nyq\n b, a = signal.butter(order, [low, high], btype=filterType)\n y = signal.lfilter(b, a, data['Signal'])\n ##Graph - This belongs somewhere else probably.\n # t = np.linspace(0, len(data), len(data), endpoint=False)\n # plt.plot(t, y, label='Sensor #' + str(eegSensor) + ' (' + str(lowFreq) + '-' + str(highFreq) + ') Hz')\n # plt.grid(True)\n # plt.axis('tight')\n # plt.xticks(range(10), range(lengthOfTestSeconds)) ##32 seconds per test?\n # plt.xlabel(\"Time in Seconds\")\n # plt.legend(loc='upper left')\n # plt.show()\n output = {}\n timestep = []\n for index, eegChannel in enumerate(y[0]):#the extra [0] is becuase signal.lfilter() puts it in a 1D array. Grrr\n timestep.append(eegChannel)\n output['Signal'] = timestep\n Visualization.channelGraph(y[0][0])\n return output #output is 2D 64xTimeSamples", "def jack_SB_func(SB_array, R_array, band_str, N_sample,):\n\tband_id = band.index( band_str )\n\n\tdx_r = np.array(R_array)\n\tdy_sb = np.array(SB_array)\n\n\tn_r = dx_r.shape[1]\n\n\tLen = np.zeros( n_r, dtype = np.float32)\n\tfor nn in range( n_r ):\n\t\ttmp_I = dy_sb[:,nn]\n\t\tidnn = np.isnan(tmp_I)\n\t\tLen[nn] = N_sample - np.sum(idnn)\n\n\tStack_R = np.nanmean(dx_r, axis = 0)\n\tStack_SB = np.nanmean(dy_sb, axis = 0)\n\tstd_Stack_SB = np.nanstd(dy_sb, axis = 0)\n\n\t### only calculate r bins in which sub-sample number larger than one\n\tid_one = Len > 1\n\tStack_R = Stack_R[ id_one ]\n\tStack_SB = Stack_SB[ id_one ]\n\tstd_Stack_SB = std_Stack_SB[ id_one ]\n\tN_img = Len[ id_one ]\n\tjk_Stack_err = np.sqrt(N_img - 1) * std_Stack_SB\n\n\t### limit the radius bin contribution at least 1/3 * N_sample\n\tid_min = N_img >= np.int(N_sample / 3)\n\tlim_r = Stack_R[id_min]\n\tlim_R = np.nanmax(lim_r)\n\n\t## change flux to magnitude\n\tjk_Stack_SB = 22.5 - 2.5 * np.log10(Stack_SB) + mag_add[band_id]\n\tdSB0 = 22.5 - 2.5 * np.log10(Stack_SB + jk_Stack_err) + mag_add[band_id]\n\tdSB1 = 22.5 - 2.5 * np.log10(Stack_SB - jk_Stack_err) + mag_add[band_id]\n\terr0 = jk_Stack_SB - dSB0\n\terr1 = dSB1 - jk_Stack_SB\n\tid_nan = np.isnan(jk_Stack_SB)\n\tjk_Stack_SB, jk_Stack_R = jk_Stack_SB[id_nan == False], Stack_R[id_nan == False]\n\tjk_Stack_err0, jk_Stack_err1 = err0[id_nan == False], err1[id_nan == False]\n\tdSB0, dSB1 = dSB0[id_nan == False], dSB1[id_nan == False]\n\tidx_nan = np.isnan(dSB1)\n\tjk_Stack_err1[idx_nan] = 100.\n\n\treturn jk_Stack_SB, jk_Stack_R, jk_Stack_err0, jk_Stack_err1, Stack_R, Stack_SB, jk_Stack_err, lim_R", "def single_pulse_SCPI(pulsewidth, updown, high_voltage, low_voltage, channel = '1', *args, **kwargs):\n\tif pulsewidth[-2:] not in set({'ns', 'us', 'ms',}):\n\t\tif pulsewidth[-1] != 's':\n\t\t\traise ValueError('pulsewidth ' + str(pulsewidth) + ' not supported')\n\tif updown not in set({'up', 'down'}):\n\t\traise ValueError('updown ' + str(updown) + ' not supported')\n\tif high_voltage[-2:].lower() not in set({'mv'}):\n\t\tif high_voltage[-1].lower() != 'v':\n\t\t\traise ValueError('high_voltage ' + str(high_voltage) + ' not supported')\n\tif low_voltage[-2:].lower() not in set({'mv'}):\n\t\tif low_voltage[-1].lower() != 'v':\n\t\t\traise ValueError('low_voltage ' + str(low_voltage) + ' not supported')\n\tif channel not in set({'1', '2'}):\n\t\traise ValueError('channel ' + str(channel) + ' not supported')\n\t\n\tif updown == 'up':\n\t\tout = 'outp'+channel+':puls:mode sin;'\n\t\tout += ':sour'+channel+':inv off;'\n\t\tout += ':sour'+channel+':volt:lev:imm:high '+high_voltage + ';'\n\t\tout += ':sour'+channel+':volt:lev:imm:low '+low_voltage + ';'\n\t\t#puls1 means the first pulse because we are in single mode\n\t\tout += ':sour'+channel+':puls1:wid '+pulsewidth + ';'\n\t\treturn out\n\telse:\n\t\tout = 'outp'+channel+':puls:mode sin;'\n\t\tout += ':sour'+channel+':inv on;'\n\t\tout += ':sour'+channel+':volt:lev:imm:low '+low_voltage + ';'\n\t\tout += ':sour'+channel+':volt:lev:imm:high '+high_voltage + ';'\n\t\t#puls1 means the first pulse because we are in single mode\n\t\tout += ':sour'+channel+':puls1:wid '+pulsewidth + ';'\n\t\treturn out", "def generate_singlesine(time = 0, samples_nb = 1000, rep_frequency = 10 , pulse_frequency = 50, amplitude = 1 , edge = 1, phase_offset = 0, noise = 0):\r\n\r\n\tif edge not in [0,1]:\r\n\t\tprint(colorama.Back.RED + colorama.Style.BRIGHT + \"ERROR: invalid phase (either 0 for a rising or a 1 for a falling edge) , exit.\"+ colorama.Style.NORMAL + colorama.Back.RESET)\r\n\t\t# Return code for error (empty input file):\r\n\t\tsys.exit(10)\r\n\r\n\r\n\t#Creating empty lists for t and y\r\n\tt = np.zeros(samples_nb)\r\n\r\n\tif noise == 0:\r\n\t\ty = np.zeros(samples_nb)\r\n\telse:\r\n\t\ty = np.random.normal(0, noise, samples_nb)\r\n\r\n\t#Determining the interval limits of t\r\n\tt_limit =1/float(rep_frequency*2)\r\n\r\n\t#Updating the t interval\r\n\tt = np.arange(-samples_nb/2,samples_nb/2)/float(samples_nb*rep_frequency) + 1/float(samples_nb*rep_frequency)\r\n\r\n\r\n\t#calculating the time_shift\r\n\t#delta_t = phase_offset/(2*np.pi*pulse_frequency)\r\n\tdelta_t = phase_offset/(2*np.pi*rep_frequency)\r\n\r\n\t#Setting the pulse amplitude\r\n\ta_pulse = amplitude\r\n\tif edge == 1:\r\n\t\ta_pulse *= -1\r\n\r\n\t#Calculating the pulse limits\r\n\tp_limit = 1/float(2*pulse_frequency)\r\n\tp_interval = list ([-p_limit,p_limit])\r\n\r\n\r\n\tfor n in range (0,len(t)) :\r\n\t\tif (t[n] + delta_t) > p_interval[0] and (t[n] + delta_t) <= p_interval[1]:\r\n\t\t\ty[n] += a_pulse * np.sin(2*np.pi*pulse_frequency*(t[n]+delta_t))\r\n\r\n\r\n\r\n\t#plt.plot(t,y)\r\n\t#plt.show()\r\n\r\n\tresult = {}\r\n\tresult ['time'] = time\r\n\tresult ['t'] = t\r\n\tresult ['y'] = y\r\n\r\n\treturn result" ]
[ "0.6036829", "0.5981471", "0.54640365", "0.5410211", "0.54006976", "0.53201425", "0.5316566", "0.525516", "0.52416986", "0.52327883", "0.52327174", "0.5224613", "0.51513845", "0.5149857", "0.5139385", "0.50736994", "0.50405174", "0.5040428", "0.50394034", "0.50340354", "0.5015567", "0.5012473", "0.5009376", "0.5004561", "0.49965826", "0.49650756", "0.49550018", "0.49468863", "0.49277592", "0.49078223" ]
0.678524
0
Pulse doppler processing yields a range/doppler data matrix that represents moving target data that's separated from clutter. An estimation of the doppler shift can also be obtained from pulse doppler processing. FFT taken across slowtime (pulse) dimension.
def pulse_doppler(x, window=None, nfft=None): [num_pulses, samples_per_pulse] = x.shape if nfft is None: nfft = num_pulses if window is not None: Nx = num_pulses if callable(window): W = window(cp.fft.fftfreq(Nx)) elif isinstance(window, cp.ndarray): if window.shape != (Nx,): raise ValueError("window must have the same length as data") W = window else: W = get_window(window, Nx, False)[cp.newaxis] pd_dataMatrix = \ cp.fft.fft(cp.multiply(x, cp.tile(W.T, (1, samples_per_pulse)), nfft, axis=0)) else: pd_dataMatrix = cp.fft.fft(x, nfft, axis=0) return pd_dataMatrix
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_pulse_shaping_waveform(self):\n self.pulse_shaping_list = []\n # Make the rise time be 3.3333% if the dot time.\n rise_time_in_msec = 0.03333333333333 * self.dot_time_in_msec\n # Limit the rise time to 2 milliseconds.\n if rise_time_in_msec > 0.002:\n rise_time_in_msec = 0.002\n rising_falling_count = int(rise_time_in_msec * self.sample_rate)\n step = math.pi / rising_falling_count\n # The first value is zero, so skip that value.\n # The last value is 1.0, so skip that value too.\n for i in range(1, rising_falling_count - 1):\n gain = 0.5 * (1.0 - math.cos(step * i))\n self.pulse_shaping_list.append(gain)", "def generatePulse(self, duration=100, gaussian=False, frequency=12., amplitude=1., phase=0., DelayFromZero=0, useCalibration=False, shape=None, name=None):\n if name is None:\n name = 'self%i' % self.index\n self.index += 1\n self._params[\"pulses\"][name] = dict()\n self._params[\"pulses\"][name][\"frequency\"] = frequency\n self._params[\"pulses\"][name][\"name\"] = name\n if shape is None:\n self._params[\"pulses\"][name][\"shape\"] = None\n self._params[\"pulses\"][name][\"duration\"] = duration\n self._params[\"pulses\"][name][\"gaussian\"] = gaussian\n self._params[\"pulses\"][name][\"amplitude\"] = amplitude\n self._params[\"pulses\"][name][\"phase\"] = phase\n self._params[\"pulses\"][name][\"DelayFromZero\"] = DelayFromZero\n self._params[\"pulses\"][name][\"useCalibration\"] = useCalibration\n else:\n self._params[\"pulses\"][name][\"shape\"] = \"userDefined\"\n\n pulse = numpy.zeros(self.numberOfPoints(), dtype=numpy.complex128)\n if self._params['modulationMode'] == \"IQMixer\":\n MWFrequency = float(self._MWSource.frequency())\n self._MWSource.turnOn()\n IF = frequency - MWFrequency\n try:\n if shape is None:\n if gaussian:\n print 'gaussian pulse is not working !!!'\n pulse = self.gaussianPulse(\n sigma=duration, delay=DelayFromZero, amplitude=amplitude)\n else:\n pulse[DelayFromZero:DelayFromZero + duration] = amplitude\n else:\n pulse[:] = shape[:]\n pulse *= numpy.exp(1.0j * phase) / 2\n # REPLACED BY DV IN FEB 2015 (see line below supressed section)\n # if useCalibration:\n #calibrationParameters=self._mixer.calibrationParameters(f_sb=f_sb, f_c=MWFrequency)\n #self.debugPrint( calibrationParameters)\n #cr = float(calibrationParameters['c'])*exp(1j*float(calibrationParameters['phi']))\n #self.debugPrint( \"f_sb=\",f_sb)\n #sidebandPulse = exp(-1.j*f_sb*2.0*math.pi*(arange(DelayFromZero,DelayFromZero+len(pulse))))+cr*exp(1.j*f_sb*2.0*math.pi*(arange(DelayFromZero,DelayFromZero+len(pulse))))\n # self._AWG.setOffset(self._params[\"AWGChannels\"][0],calibrationParameters['i0'])\n # self._AWG.setOffset(self._params[\"AWGChannels\"][1],calibrationParameters['q0'])\n # else:\n #sidebandPulse = exp(-1.j*2.0*math.pi*f_sb*(arange(DelayFromZero,DelayFromZero+len(pulse))))\n #waveformIQ =exp(-2.0j*pi*IF*(times+float(delay)))\n # self._AWG.setOffset(self._params[\"AWGChannels\"][0],0)\n # self._AWG.setOffset(self._params[\"AWGChannels\"][1],0)\n sidebandPulse = self._mixer.generateSidebandWaveform(\n IF=IF, useCalibIfNone=useCalibration, length=self.numberOfPoints())\n pulse[:] *= sidebandPulse\n except:\n raise\n\n if self._params['modulationMode'] == \"SimpleMixer\":\n self._MWSource.setFrequency(frequency)\n if shape is None:\n if gaussian:\n print 'gaussian pulse is not working !!!'\n print duration, DelayFromZero, amplitude\n pulse = self.gaussianPulse(\n sigma=duration, delay=DelayFromZero, amplitude=amplitude)\n else:\n pulse[DelayFromZero:DelayFromZero + duration] = amplitude\n else:\n pulse[:] = shape[:]\n if useCalibration:\n self._AWG.setOffset(\n self._params[\"AWGChannels\"], self._mixer.calibrationParameters())\n if self._params['modulationMode'] == \"InternalModulation\":\n print \"NOT CONFIGURED YET ! DO NOT USE !\"\n self.pulses[name] = [pulse, True]", "def dd_plan(centrefreq, bandwidth, nfreqchan, timeres, lowDM, highDM, min_DM_step=0.02):\n\n DD_plan_array = []\n freqres = bandwidth / float(nfreqchan)\n previous_DM = lowDM\n\n #number of time samples smeared over before moving to next D_dm\n smear_fact = 3.\n\n #Loop until you've made a hit your range max\n D_DM = 0.\n downsample = 1\n while D_DM < round(highDM, 2):\n #calculate the DM where the current time resolution equals the\n #dispersion in a frequency channel (a bit of an overkill)\n\n #Dm smear over a frequency channel\n dm_smear = previous_DM * freqres * 8.3 * 10.**6 / centrefreq**3\n total_smear = math.sqrt(timeres**2 +\n dm_smear**2)\n\n\n D_DM = smear_fact * timeres * centrefreq**3 /\\\n (8.3 * 10.**6 * freqres)\n\n #difference in DM that will double the effective width (eq 6.4 of pulsar handbook)\n #TODO make this more robust\n #DM_step = math.sqrt( (2.*timeres)**2 - timeres**2 )/\\\n # (8.3 * 10**6 * bandwidth / centrefreq**3)\n DM_step = smear_fact * total_smear * centrefreq**3 /\\\n (8.3 * 10.**6 * 0.5 * bandwidth)\n\n\n #round to nearest 0.01\n DM_step = round(DM_step, 2)\n if DM_step < min_DM_step:\n #set DM to 0.01 as a zero DM doesn't make sense\n DM_step = min_DM_step\n\n\n if D_DM > highDM:\n #last one so range from to max\n D_DM = highDM\n #range from last to new\n D_DM = round(D_DM, 2)\n nDM_step = int((D_DM - previous_DM) / DM_step)\n if D_DM > lowDM:\n DD_plan_array.append([ previous_DM, D_DM, DM_step, nDM_step, timeres, downsample ])\n previous_DM = D_DM\n\n #Double time res to account for incoherent dedispersion\n timeres *= 2.\n downsample *= 2\n\n return DD_plan_array", "def preparePulseSequence(self):\n # get carrier frequency\n if self._MWSource is not None:\n carrierFrequency = self.carrierFrequency()\n else:\n carrierFrequency = 0\n\n # Decide to apply or not corrections\n applyCorrectionsArray = [\n pulse.applyCorrections for pulse in self.pulseList]\n applyCorrection = True in applyCorrectionsArray\n\n # Define self.pulseSequenceArray\n self.pulseSequenceArray = zeros(\n self.numberOfPoints(), dtype=numpy.complex128)\n self._offsets = [None, None]\n\n for pulse in self.pulseList:\n if not(pulse.pulseOn):\n continue\n correctedPulseArray = zeros(\n self.numberOfPoints(), dtype=numpy.complex128)\n\n if self._params[\"modulationMode\"] == \"IQMixer\":\n if pulse.frequency is None:\n pulse.frequency = carrierFrequency\n IF = pulse.frequency - carrierFrequency\n # if applyCorrection:\n # calibrationParameters=self._mixer.calibrationParameters(f_sb=pulse.frequency-carrierFrequency, f_c=carrierFrequency)\n # self.debugPrint(calibrationParameters)\n # sidebandPulse=self._mixer.generateSidebandWaveform(f_sb = f_sb,c = calibrationParameters['c'],phi = calibrationParameters['phi'],length=self.numberOfPoints())\n # self._offsets=[calibrationParameters[\"i0\"],calibrationParameters[\"q0\"]]\n # else:\n # sidebandPulse = exp(-1.j*(2.0*math.pi*f_sb*arange(0,self.numberOfPoints())))\n sidebandPulse = self._mixer.generateSidebandWaveform(\n IF=IF, useCalibIfNone=applyCorrection, length=self.numberOfPoints())\n correctedPulseArray[:] = pulse._pulseArray * \\\n sidebandPulse * exp(1.j * pulse.phase)\n\n elif self._params[\"modulationMode\"] == \"SimpleMixer\":\n if applyCorrection:\n self._offsets = [self._mixer.calibrationParameters()]\n else:\n self._offsets[0] = 0\n correctedPulseArray[:] = pulse._pulseArray\n\n elif self._params[\"modulationMode\"] == \"InternalModulation\":\n print \"not configured yet\"\n\n elif self._params[\"modulationMode\"] is None:\n if applyCorrection:\n if hasattr(self, \"pulseCorrectionFunction\"):\n correctedPulseArray[:] = self.pulseCorrectionFunction(\n pulse._pulseArray)\n else:\n print self.name(), \": no correction function found for DC pulses\"\n correctedPulseArray[:] = pulse._pulseArray\n else:\n correctedPulseArray[:] = pulse._pulseArray\n else:\n print \"bad modulationMode\"\n\n self.pulseSequenceArray[:] += correctedPulseArray[:]", "def build_pulse_waveform(startper,endper):\r\n mywaveform = numpy.zeros(100, dtype=numpy.int)\r\n if startper > endper:\r\n mywaveform[0:endper]=1\r\n mywaveform[startper:100]=1\r\n else:\r\n mywaveform[startper:endper]=1 \r\n return mywaveform", "def __init__(self, power, T0_ps, center_wavelength_nm,\n time_window_ps = 10., frep_MHz = 100., NPTS = 2**10, \n GDD = 0, TOD = 0, chirp2 = 0, chirp3 = 0,\n power_is_avg = False):\n\n Pulse.__init__(self, frep_MHz = frep_MHz, n = NPTS)\n # make sure we weren't passed mks units \n assert (center_wavelength_nm > 1.0) \n assert (time_window_ps > 1.0 ) \n self.set_center_wavelength_nm(center_wavelength_nm)\n self.set_time_window_ps(time_window_ps) \n \n GDD = GDD\n TOD = TOD\n \n # from https://www.rp-photonics.com/gaussian_pulses.html\n self.set_AT( np.sqrt(power) * np.exp(-2.77*0.5*self.T_ps**2/(T0_ps**2)) ) # input field (W^0.5) \n if power_is_avg: \n self.set_AT(self.AT * np.sqrt( power / ( frep_MHz*1.0e6 * self.calc_epp()) ))\n self.chirp_pulse_W(GDD, TOD)\n self.chirp_pulse_T(chirp2, chirp3, T0_ps)", "def velPulse (t,Dp,t1,Tp):\r\n\tdiscretePulse=(Dp*math.pi)/(Tp*float(2))*np.cos(math.pi*(t-t1-Tp/float(2))/float(Tp))\r\n\treturn discretePulse", "def __init__(self, power, FWHM_ps, center_wavelength_nm,\n time_window_ps = 10., frep_MHz = 100., NPTS = 2**10, \n GDD = 0, TOD = 0, chirp2 = 0, chirp3 = 0,\n power_is_avg = False):\n Pulse.__init__(self, frep_MHz = frep_MHz, n = NPTS)\n # make sure we weren't passed mks units \n assert (center_wavelength_nm > 1.0) \n assert (time_window_ps > 1.0 ) \n self.set_center_wavelength_nm(center_wavelength_nm) \n self.set_time_window_ps(time_window_ps)\n\n T0_ps = FWHM_ps/3.7909885\n ### Generate pulse\n if not power_is_avg:\n # numpy.sinc is sin(pi*x)/(pi*x), so we divide by pi\n self.set_AT( np.sqrt(power) * np.sinc(self.T_ps/(T0_ps*np.pi)) ) \n else:\n self.set_AT( 1 / np.sinc(np.pi * self.T_ps/(T0_ps*np.pi)) )\n self.set_AT(self.AT * np.sqrt( power / ( frep_MHz*1.0e6 * self.calc_epp()) ))\n \n self.chirp_pulse_W(GDD, TOD)\n self.chirp_pulse_T(chirp2, chirp3, T0_ps)", "def dispPulse (t,Dp,t1,Tp):\r\n\tdiscretePulse=Dp/float(2)*np.sin(math.pi*(t-t1-Tp/float(2))/float(Tp))+Dp/float(2)\r\n\treturn discretePulse", "def __init__(self, power, T0_ps, center_wavelength_nm,\n time_window_ps = 10., frep_MHz = 100., NPTS = 2**10, \n GDD = 0, TOD = 0, chirp2 = 0, chirp3 = 0,\n power_is_avg = False):\n Pulse.__init__(self, frep_MHz = frep_MHz, n = NPTS)\n # make sure we weren't passed mks units \n assert (center_wavelength_nm > 1.0) \n assert (time_window_ps > 1.0 ) \n self.set_center_wavelength_nm(center_wavelength_nm) \n self.set_time_window_ps(time_window_ps)\n \n ### Generate pulse\n if not power_is_avg:\n # from https://www.rp-photonics.com/sech2_shaped_pulses.html\n self.set_AT( np.sqrt(power)/np.cosh(self.T_ps/T0_ps) )\n else:\n self.set_AT( 1 / np.cosh(self.T_ps/T0_ps) )\n self.set_AT(self.AT * np.sqrt( power / ( frep_MHz*1.0e6 * self.calc_epp()) ))\n \n self.chirp_pulse_W(GDD, TOD)\n self.chirp_pulse_T(chirp2, chirp3, T0_ps)", "def _delayandsum2(data, offsets, ifactor2, steeramp, dr, out):\n gridsize, numchannels = offsets.shape\n for gi in nb.prange(gridsize):\n out[gi] = 0\n autopower = 0\n for mi in range(numchannels):\n ind = offsets[gi,mi]\n r = (data[ind,mi] * (1-ifactor2[gi,mi]) \\\n + data[ind+1,mi] * ifactor2[gi,mi]) * steeramp[gi,mi]\n out[gi] += r\n autopower += r*r\n out[gi] = out[gi]*out[gi] - dr * autopower\n if out[gi]<1e-100:\n out[gi] = 1e-100", "def test_dflipflop(self):\n circ = DFlipFlop(size=2)\n circ.clk.pulse()\n self.assertSigEq(circ.q, 0)\n circ.d = 3\n self.assertSigEq(circ.q, 0)\n circ.clk.set()\n self.assertSigEq(circ.q, 3)\n circ.d = 2\n self.assertSigEq(circ.q, 3)", "def gen_pulsed_stimulus(T, dt, odor_idx, pulse_duration=(0.1, 0.5), n_stim=1):\n # randomly sample pulse duration\n pulses = np.random.uniform(pulse_duration[0], pulse_duration[1], size=100)\n pulse_bins = (pulses / dt).astype(np.int).tolist()\n random.shuffle(pulse_bins)\n X = np.zeros((n_stim, int(T / dt)))\n\n # randomly position pulse - use poisson to have more variability in positioning\n n_bins = int(T / dt)\n spaced_pulses = [[1] * pulse_bins[0]]\n start_bins = np.random.randint(0, n_bins-int(0.1/dt), size=200).astype(np.int).tolist()\n #start_bins = (np.random.poisson(int(T * 100), size=10) / 100 / dt).astype(np.int)\n #print(\"start_bins: {}\".format(start_bins))\n start_bins = list(filter(lambda p: (p+pulse_bins[0]) < (n_bins-5), start_bins))\n random.shuffle(start_bins)\n #print(\"start_bins: {} | {} sec\".format(start_bins, np.array(start_bins)*dt))\n start_bin = start_bins[0]\n print(\"pulse offset: {}sec | duration: {}sec\".format(start_bin * dt, pulse_bins[0] * dt))\n\n pulse_times = [[] for _ in range(n_stim)]\n for k, s in enumerate(spaced_pulses):\n X[odor_idx, start_bin:start_bin + len(s)] = s\n pulse_times[odor_idx].append(start_bin * dt)\n\n X_prime = np.c_[np.zeros((n_stim, 1)), X]\n return X, [len(np.where(np.diff(X_prime[n, :]) == 1)[0]) for n in range(n_stim)], pulse_times", "def __init__(self, time_window_ps, center_wavelength_nm, power,frep_MHz = 100., NPTS = 2**10,\n power_is_avg = False,\n fileloc = '',\n flip_phase = True):\n Pulse.__init__(self, frep_MHz = frep_MHz, n = NPTS)\n try:\n self.fileloc = fileloc\n # make sure we weren't passed mks units\n assert (center_wavelength_nm > 1.0) \n assert (time_window_ps > 1.0 )\n self.set_time_window_ps(time_window_ps)\n self.set_center_wavelength_nm(center_wavelength_nm) # reference wavelength (nm) \n \n # power -> EPP\n if power_is_avg:\n power = power / self.frep_mks\n \n # Read in retrieved FROG trace\n frog_data = np.genfromtxt(self.fileloc)\n wavelengths = frog_data[:,0]# (nm)\n intensity = frog_data[:,1]# (arb. units)\n phase = frog_data[:,2]# (radians)\n\n if flip_phase:\n phase = -1 * phase\n \n pulse_envelope = interp1d(wavelengths, intensity, kind='linear',\n bounds_error=False,fill_value=0)\n phase_envelope = interp1d(wavelengths, phase, kind='linear', \n bounds_error=False,fill_value=0)\n \n gridded_intensity = pulse_envelope(self.wl_nm)\n gridded_phase = phase_envelope(self.wl_nm)\n\n # Calculate time domain complex electric field A\n self.set_AW(gridded_intensity*np.exp(1j*gridded_phase))\n # Calculate normalization factor to achieve requested \n # pulse energy\n e_scale = np.sqrt(power / self.calc_epp() )\n self.set_AT(self.AT * e_scale )\n\n except IOError:\n print ('File not found.' )", "def captured_signal(waveform, shift, p):\n return time_varying_delay(waveform, shift, p)", "def __demodulate(self,samples= None,doppEst=0,plotResults = False):\n\n doppShift = self.dopplerIdxlast # found in the Doppler search\n # the signal still resides in the GPU buffer and still be fft'd\n # Fine tune the frequency offset\n\n ts = time.time()\n \n self.complexShiftMulMasks.prepared_call(self.gShapeVecMasks2,\n self.bShapeVecMasks2,\n self.GPU_bufXcorr,\n self.GPU_bufSignalFreq,\n self.GPU_bufBitsMask,\n np.int32(doppShift))\n\n\n # Ifft the data in one batch\n cufft.cufftExecC2C(self.fftPlanDemod,int(self.GPU_bufXcorr),int(self.GPU_bufXcorr),cufft.CUFFT_INVERSE)\n if log.level == logging.DEBUG:\n log.debug(f'Time demodulate FFT {time.time()-ts} s')\n \n\n tC = time.time()\n spSym,codeOffset = self.findCodeRateAndPhaseGPU()\n if log.level == logging.DEBUG:\n log.debug(f'Time demodulate findCodeRateAndPhase {time.time()-tC} s')\n\n \n ops = Operations.CENTRES_ABS # the operation to do on the matched filter.\n\n ## Decoding:\n if log.level == logging.DEBUG:\n log.debug('Time demodulate %f',time.time() - tC)\n tA = time.time()\n # t = time.time()\n idxSymbol, amplitudes, centres, symbols2, amplitudes2, trustSymbol = self.cudaFindCentres(spSym,codeOffset,ops)\n # print('time spent on centre search {}'.format(time.time()-t))\n # tb = time.time()\n dataBits, symError_t = self.extractBits(centres,idxSymbol)\n noError = len(symError_t)\n \n ## Remove the overlapping parts - up to 1 ms\n tSO = time.time()\n centresWin,dataBitsWin,trustSymbolWin, idxSymbolWin = self.checkSymbolOverlap(noError,centres,idxSymbol,dataBits,trustSymbol)\n if log.level <= logging.DEBUG:\n log.debug('Time demodulate overlap time {}'.format(time.time()-tSO))\n\n tP = time.time()\n\n ## tag the interference on the bits - less than 100 us\n if len(self.clippedPeakIPure) > 0:\n cPSpan = self.clippedPeakSpan # This is the distance in symbols to where we assume a clipped peak can affect a bit\n clippedPeaks = self.clippedPeakIPure[self.clippedPeakIPure>centresWin[0]-cPSpan*spSym] # remove the first peaks that are out of the window\n if len(clippedPeaks) > 0: \n clippedPeaks = clippedPeaks[clippedPeaks < centresWin[-1]+cPSpan*spSym] # cut of the end that is out of the window\n else:\n clippedPeaks = []\n\n if log.level == logging.DEBUG:\n log.debug('Time demodulate find peaks %f',time.time()-tP)\n t = time.time()\n\n # tag clipped peaks in the trust\n pp = np.zeros(self.Nfft,dtype = np.bool)\n spSymc = int(np.ceil(spSym))\n for cp in self.clippedPeakIPure:\n pp[cp-2*spSymc:cp+2*spSymc+1] = 1\n\n idxVal = pp[centresWin]\n trustSymbolWin[idxVal] = -2\n if log.level == logging.DEBUG:\n log.debug('Time demodulate peaks %f new ',time.time()-t)\n\n if STORE_BITS_IN_FILE is True:\n tstore = time.time()\n self.all_bits = np.append(self.all_bits,dataBitsWin.astype(DATATYPE))\n self.all_trust = np.append(self.all_trust,trustSymbolWin)\n self.frames = np.append(self.frames,len(self.all_bits))\n self.code_rate = np.append(self.code_rate, spSym)\n self.code_phase = np.append(self.code_phase, codeOffset)\n\n xcorrResS = np.empty((self.num_masks,self.Nfft),dtype=np.complex64)\n cuda.memcpy_dtoh(xcorrResS,self.GPU_bufXcorr)\n xcorrResS = np.expand_dims(xcorrResS,0)\n self.xcorrOut.append(xcorrResS)\n \n np.savez(BITS_FNAME,all_bits=self.all_bits,all_trust=self.all_trust, frames=self.frames,doppMatch = self.sum_match, code_rate = self.code_rate, code_phase = self.code_phase,masks = self.masks)\n if log.level == logging.DEBUG:\n log.debug(f'demodulator store in file 2 (save to file) time {time.time()-tstore} s')\n # Phase windup computations not done at the moment\n # return dataBitsWin, centres, idxSymbolWin,idxSymbolPreWin,idxSymbolPostWin, trustSymbolWin, amplitudes ,spSym\n return dataBitsWin.astype(np.uint8), centresWin.astype(np.uint8), trustSymbolWin.astype(np.uint8), spSym", "def accPulse (t,Dp,t1,Tp):\r\n\tdiscretePulse=-(Dp*math.pi**2)/(float(2)*Tp**2)*np.sin(math.pi*(t-t1-Tp/float(2))/float(Tp))\r\n\treturn discretePulse/float(981)", "def get_doppler(self, iq_samples):\n ## print('-------------------- get_doppler --------------------', self._frame_counter,len(iq_samples))\n success,doppler = False,0\n if len(iq_samples) == 0:\n return success,doppler\n\n sps = self._sps\n zp = np.array([x for x in self._preamble['symb'][9:40]\n for _ in range(sps)], dtype=np.complex64)\n cc = np.correlate(iq_samples, zp)\n imax = np.argmax(np.abs(cc[0:18*sps]))\n pks = cc[(imax,imax+31*sps),]\n tpks = cc[imax+15*sps:imax+16*sps]\n ## print('doppler: ', np.abs(pks), np.abs(tpks))\n success = np.mean(np.abs(pks)) > 5*np.mean(np.abs(tpks))\n doppler = np.diff(np.unwrap(np.angle(pks)))[0]/31/self._sps if success else 0\n return success,doppler", "def calculate_pulse(processed_video, recorded_time, show_processed_image):\n if show_processed_image:\n processed_video = extract_red_values(processed_video, show_processed_image)\n processed_video = np.asarray(processed_video, dtype=np.float32)\n red_values = []\n for i in range(0, processed_video.shape[0]):\n img = processed_video[i]\n red_intensity = np.mean(img)\n red_values.append(red_intensity)\n peaks, _ = find_peaks(red_values)\n pulse = (len(peaks) / float(recorded_time)) * 60\n pulse = np.int16(pulse)\n rospy.loginfo(\"[EulerianMotionMagnification] Pulse: \" + str(pulse))\n return pulse, red_values", "def prep(self, deleteraw=False):\n print\n print 'Filtering rawdata to data as masked array...'\n# using 0 as flag\n# self.data = n.ma.masked_array(self.rawdata[:self.nints,:, self.chans,:], self.rawdata[:self.nints,:, self.chans,:] == 0j)\n# using standard flags\n self.data = n.ma.masked_array(self.rawdata[:self.nints,:, self.chans,:], self.flags[:self.nints,:, self.chans,:] == 0) # mask of True for flagged data (flags=0 in tpipe, which is flags=False in Miriad and flags=True in MS)\n self.dataph = (self.data.mean(axis=3).mean(axis=1)).real #dataph is summed and detected to form TP beam at phase center, multi-pol\n self.min = self.dataph.min()\n self.max = self.dataph.max()\n print 'Shape of data:'\n print self.data.shape\n print 'Dataph min, max:'\n print self.min, self.max\n\n if deleteraw:\n del self.rawdata\n del self.flags\n\n self.freq = self.freq_orig[self.chans]\n\n # set up ur tracks (lol)\n self.dmtrack0 = {}\n self.twidths = {}\n self.delay = {}\n for dmbin in xrange(len(self.dmarr)):\n self.dmtrack0[dmbin] = self.dmtrack(self.dmarr[dmbin],0) # track crosses high-freq channel in first integration\n (trackt, trackc) = self.dmtrack0[dmbin]\n if len(trackc)<len(self.chans):\n print 'Computed track for DM=%.1f is too long for the observation; only %d channels are computed' % (self.dmarr[dmbin],len(trackc))\n continue\n \n# old way\n# self.twidths[dmbin] = [len(n.where(trackc == (chan-self.chans[0]))[0]) for chan in self.chans] # width of track for each unflagged channel\n# self.delay[dmbin] = [n.int(trackt[n.where(trackc == (chan-self.chans[0]))[0][0]]) for chan in self.chans] # integration delay for each unflagged channel of a given dm.\n# new way\n\n self.twidths[dmbin] = [len(n.where(n.array(trackc) == chan)[0]) for chan in range(len(self.chans))] # width of track for each unflagged channel\n self.delay[dmbin] = [n.int(trackt[n.where(n.array(trackc) == chan)[0][0]]) for chan in range(len(self.chans))] # integration delay for each unflagged channel of a given dm.\n\n\n print 'Track width in time: '\n for dmbin in self.twidths:\n print 'DM=%.1f, max(twidth)=%d. Iteration could step by %d/2.' % (self.dmarr[dmbin], max(self.twidths[dmbin]), max(self.twidths[dmbin]))", "def pdm_py(time, signal, f0=None, fn=None, df=None, Nbin=10, Ncover=5, D=0.):\n freq = np.arange(f0,fn+df,df)\n \n Ntime = len(time)\n Nfreq = len(freq)\n \n binsize = 1.0 / Nbin\n covershift = 1.0 / (Nbin * Ncover)\n \n theta = np.zeros(Nfreq)\n \n for i in range(Nfreq):\n \n # Compute the phases in [0,1[ for all time points\n phase = np.fmod((time - time[0]) * freq[i] + D/2.*time**2, 1.0)\n \n # Reset the number of (shifted) bins without datapoints\n \n Nempty = 0\n \n # Loop over all Nbin * Ncover (shifted) bins\n \n for k in range(Nbin):\n for n in range(Ncover):\n \n # Determine the left and right boundary of one such bin\n # Note that due to the modulo, right may be < left. So instead\n # of 0-----+++++------1, the bin might be 0++-----------+++1 .\n \n left = np.fmod(k * binsize + n * covershift, 1.0) \n right = np.fmod((k+1) * binsize + n * covershift, 1.0) \n\n # Select all data points in that bin\n \n if (left < right):\n bindata = np.compress((left <= phase) & (phase < right), signal)\n else:\n bindata = np.compress(~((right <= phase) & (phase < left)), signal)\n\n # Compute the contribution of that bin to the theta-statistics \n \n if (len(bindata) != 0):\n theta[i] += (len(bindata) - 1) * bindata.var()\n else:\n Nempty += 1\n \n # Normalize the theta-statistics \n\n theta[i] /= Ncover * Ntime - (Ncover * Nbin - Nempty) \n \n # Normalize the theta-statistics again\n \n theta /= signal.var() \n \n # That's it!\n \n return freq,theta", "def dedisperse(self, dmbin):\n\n dddata = self.data.copy()\n twidth = self.twidths[dmbin]\n delay = self.delay[dmbin]\n\n # dedisperse by rolling time axis for each channel\n for i in xrange(len(self.chans)):\n dddata[:,:,i,:] = n.roll(self.data[:,:,i,:], -delay[i], axis=0)\n\n return dddata", "def shutter_pulse(self, width):\n step_name = 'Shutter Pulse'\n self.shutter.settings['shutter_open'] = True\n self.db_poll(step_name)\n print('Shutter open')\n t0 = time.time()\n t_lastlog = t0\n while True:\n if self.interrupt_measurement_called:\n self.shutter.settings['shutter_open'] = False\n break\n if time.time()-t0 > width:\n break\n time.sleep(0.001)\n if time.time() - t_lastlog > 0.2:\n # do some logging\n self.db_poll(step_name)\n t_lastlog = time.time()\n \n self.shutter.settings['shutter_open'] = False\n self.settings['steps_taken'] += 1\n print('Shutter closed')", "def FD_shift(self, signal, FD_params):\n #freq in MHz, delays in milliseconds\n freq_array = signal._dat_freq\n # define the reference frequency\n ref_freq = make_quant(1000.0, 'MHz')\n # calculate the delay added in for the parameters\n time_delays = make_quant(np.zeros(len(freq_array)), 'ms') # will be in seconds\n for ii in range(len(FD_params)):\n time_delays += np.double(make_quant(FD_params[ii], 's').to('ms') * \\\n np.power(np.log(freq_array/ref_freq),ii+1)) # will be in seconds\n\n if signal.delay==None:\n signal._delay=time_delays\n else:\n signal._delay += time_delays\n # get time shift based on the sample rate\n shift_dt = (1/signal._samprate).to('ms')\n shift_start = time.time()\n # check if there are less than 20 frequency channels\n if signal.Nchan <= 20:\n div_fac = 1\n else:\n div_fac = 20\n\n for ii, freq in enumerate(freq_array):\n signal._data[ii,:] = shift_t(signal._data[ii,:],\n time_delays[ii].value,\n dt=shift_dt.value)\n if (ii+1) % int(signal.Nchan//div_fac) ==0:\n shift_check = time.time()\n percent = round((ii + 1)*100/signal.Nchan)\n elapsed = shift_check-shift_start\n chk_str = '\\r{0:2.0f}% shifted'.format(percent)\n chk_str += ' in {0:4.3f} seconds.'.format(elapsed)\n\n try:\n print(chk_str , end='', flush=True)\n #This is the Python 2 version\n #__future__ does not have 'flush' kwarg.\n except:\n print(chk_str , end='')\n sys.stdout.flush()\n\n # May need to add tihs parameter to signal\n signal._FDshifted = True", "def pdm(times, signal,f0=None,fn=None,df=None,Nbin=5,Ncover=2,\n D=0,forbit=None,asini=None,e=None,omega=None,nmax=10):\n T = times.ptp()\n n = len(times)\n \n #-- initialize variables\n xvar = signal.std()**2.\n xx = (n-1) * xvar\n nf = int((fn-f0) / df + 0.001) + 1\n f1 = np.zeros(nf,'d')\n s1 = np.zeros(nf,'d')\n \n #-- use Fortran subroutine\n #-- Normal PDM\n if D is None and asini is None:\n f1, s1 = pyscargle.justel(signal,times,f0,df,Nbin,Ncover,xvar,xx,f1,s1,n,nf)\n #-- PDM with linear frequency shift\n elif asini is None:\n f1, s1 = pyscargle.justel2(signal,times,f0,df,Nbin,Ncover,xvar,xx,D,f1,s1,n,nf)\n #-- PDM with circular binary orbit\n elif asini is not None and (e is None or e==0):\n f1, s1 = pyscargle.justel3(signal,times,f0,df,Nbin,Ncover,xvar,xx,asini,\n forbit,f1,s1,n,nf)\n #-- PDM with eccentric binary orbit\n elif e>0:\n forbit = 2*pi*forbit\n ans,bns = np.array([[__ane__(n,e),__bne__(n,e)] for n in range(1,nmax+1)]).T\n ksins = np.sqrt(ans**2*np.cos(omega)**2+bns**2*np.sin(omega)**2)\n thns = np.arctan(bns/ans*np.tan(omega))\n tau = -np.sum(bns*np.sin(omega))\n f1, s1 = pyscargle.justel4(signal,times,f0,df,Nbin,Ncover,xvar,xx,asini,\n forbit,e,omega,ksins,thns,tau,f1,s1,n,nf,nmax)\n \n \n #-- it is possible that the first computed value is a none-variable\n if not s1[0]: s1[0] = 1. \n \n return f1, s1", "def sendPulse(self, forceSend=False, name=None, markersName=None, outputName=None):\n pulse = numpy.zeros(self.numberOfPoints(), dtype=numpy.complex128)\n if name is not None:\n for k in self.pulses.keys():\n self.pulses[k][1] = False\n self.pulses[name][1] = True\n values = self.pulses.values()\n for value in values:\n if value[1]:\n pulse += value[0]\n\n markers = None # section marker added by DV + KJ 09/2013\n if markersName is not None:\n markers = numpy.zeros(self.numberOfPoints(), dtype=int8)\n if markersName != 'All':\n for k in self.markersDict.keys():\n self.markersDict[k][1] = False\n self.markersDict[markersName][1] = True\n values = self.markersDict.values() # section marker added by DV + KJ 09/2013\n for value in values:\n if value[1]:\n markers += value[0]\n if outputName is None:\n outputName = self.name()\n\n if forceSend or True:\n if self._params['modulationMode'] == \"IQMixer\":\n markers = (zeros(self.numberOfPoints(), dtype=int8),\n zeros(self.numberOfPoints(), dtype=int8))\n markers[0][:self.readoutDelay()] = 3\n markers[1][:self.readoutDelay()] = 3\n self._AWG.loadiqWaveform(iqWaveform=pulse, channels=self._params[\n \"AWGChannels\"], waveformNames=(outputName + 'i', outputName + 'q'), markers=markers)\n elif self._params['modulationMode'] == \"SimpleMixer\":\n # print \"sending pulse\"\n if self._formGeneratorType == 'AWG':\n self._AWG.loadRealWaveform(pulse, channel=self._params[\n 'AWGChannels'], markers=markers, waveformName=outputName) # Markers=None added by Kiddi 17/09/2013\n self._AWG.startAllChannels()\n self._AWG.run()\n if self._formGeneratorType == 'AFG':\n print \"need to be tried before use (pulse_generator.py)\"\n self._AWG.writeWaveform(name=outputName, waveform=pulse)\n self._AWG.turnOn()\n elif self._params['modulationMode'] == \"InternalModulation\":\n print \"NOT CONFIGURED YET ! DO NOT USE !\"\n else:\n print \"self._params['modulationMode'] not correctly set\"", "def pulse(amplitude, onsets, width, t_stop, baseline=0.0):\n times = [0]\n amps = [baseline]\n for onset in onsets:\n times += [onset, onset + width]\n amps += [amplitude, baseline]\n times += [t_stop]\n amps += [baseline]\n return np.array(times), np.array(amps)", "def sendPulseSequence(self, outputName=None):\n if outputName is None:\n outputName = str(self.name())\n if self._params['modulationMode'] == \"IQMixer\":\n if self.markersList1 == ():\n self.markerArray1[:10000] = 3\n if self.markersList2 == ():\n self.markerArray2[:10000] = 3\n markers = (self.markerArray1, self.markerArray2) # NOT WRITTEN YET\n # markers[0][:10000]=3 #################### NOT WRITTEN YET\n # markers[1][:10000]=3 #################### NOT WRITTEN YET\n self._AWG.loadComplexWaveforms(complexWaveform=self.pulseSequenceArray, channels=self._params[\n \"AWGChannels\"], waveformNames=(outputName + 'i', outputName + 'q'), markers=markers) # NOT WRITTEN YET\n if self._offsets[0] is not None:\n self._AWG.setOffset(self._params[\"AWGChannels\"][\n 0], self._offsets[0])\n if self._offsets[1] is not None:\n self._AWG.setOffset(self._params[\"AWGChannels\"][\n 1], self._offsets[1])\n elif self._params['modulationMode'] == \"SimpleMixer\":\n if self._formGeneratorType == 'AWG':\n if self.markersList1 == ():\n self.markerArray1[:10000] = 3\n markers = (self.markerArray1, self.markerArray1)\n # self._AWG.loadComplexWaveforms(complexWaveform=self.pulseSequenceArray,channels=(3,3),waveformNames=(outputName+'i',outputName+'q'),markers=markers)\n self._AWG.loadRealWaveform(self.pulseSequenceArray, channel=self._params[\n 'AWGChannels'], markers=markers[0], waveformName=outputName)\n # self._AWG.setOffset(self._params[\"AWGChannels\"],self._offsets[0])\n self._AWG.startAllChannels()\n self._AWG.run()\n if self._formGeneratorType == 'AFG':\n print \"need to be tried before use (pulse_generator.py)\"\n self._AWG.writeWaveform(name=outputName, waveform=pulse)\n self._AWG.setOffset(self._params[\"AWGChannels\"][\n 0], self._offsets[0])\n self._AWG.turnOn()\n elif self._params['modulationMode'] is None:\n if self.markersList1 == ():\n self.markerArray1[:10000] = 3\n markers = (self.markerArray1, self.markerArray1)\n self._AWG.loadRealWaveform(self.pulseSequenceArray, channel=self._params[\n 'AWGChannels'][0], markers=markers[0], waveformName=outputName)\n elif self._params['modulationMode'] == \"InternalModulation\":\n print \"NOT CONFIGURED YET ! DO NOT USE !\"\n else:\n pass\n # This is the valid sender to the hardware apparatus\n print \"self._params['modulationMode'] not correctly set: \", self._params['modulationMode']", "def Signal_Ideal_Delay(signal,d = 2):\t\t\t\t\t\t\t\t\t# Function to generate ideal delay in signal\n\ts = signal.shape[0]\n\ttime = np.arange(+d,s+d)\n\t\n\treturn signal,time", "def get_pad_data(self, pad_pulse_length):\n pad_pulse_data = []\n for k in range(3):\n for i in range(len(self.pulse_data[k])):\n if self.pulse_data[k][i].shape[0] < pad_pulse_length:\n padded = np.pad(self.pulse_data[k][i],\n (pad_pulse_length - self.pulse_data[k][i].shape[0], 0)).reshape(1, -1)\n # print(padded)\n else:\n padded = self.pulse_data[k][i][(-1)\n * pad_pulse_length:].reshape(1, -1)\n if k == 0 and i == 0:\n pad_pulse_data = padded\n else:\n pad_pulse_data = np.append(pad_pulse_data, padded, axis=0)\n # print(e)\n\n self.pad_pulse_data = pad_pulse_data\n # print(self.pad_pulse_data.shape)\n return np.array(pad_pulse_data)" ]
[ "0.60852396", "0.5998875", "0.59345144", "0.5842551", "0.58012646", "0.56403846", "0.5631913", "0.56271863", "0.5620649", "0.5618581", "0.5582362", "0.55823", "0.5566295", "0.5546302", "0.5522001", "0.5505924", "0.54939055", "0.5484252", "0.54836947", "0.54045796", "0.53633", "0.53605306", "0.53185225", "0.53184795", "0.52899593", "0.5289484", "0.5286075", "0.5272383", "0.526597", "0.52649456" ]
0.7112541
0
Verifies we can add a new tag to a VM and not modify an existing tag on that resource
def test_add_or_update_single_tag(self): p = self.load_policy({ 'name': 'test-azure-tag', 'resource': 'azure.vm', 'filters': [ {'type': 'value', 'key': 'name', 'op': 'eq', 'value_type': 'normalize', 'value': 'cctestvm'} ], 'actions': [ {'type': 'tag', 'tag': 'tag1', 'value': 'value1'} ], }) p.run() # verify that the a new tag is added without modifying existing tags s = Session() client = s.client('azure.mgmt.compute.ComputeManagementClient') vm = client.virtual_machines.get('test_vm', 'cctestvm') self.assertEqual(vm.tags, {'tag1': 'value1', 'testtag': 'testvalue'})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_vm_tag_put(self):\n pass", "def test_removal_does_not_raise_on_nonexistent_tag(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'untag',\n 'tags': ['tag-does-not-exist']},\n ],\n })\n\n # verify initial tag set is empty\n s = Session()\n client = s.client('azure.mgmt.compute.ComputeManagementClient')\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'testtag': 'testvalue'})\n\n raised = False\n try:\n p.run()\n except KeyError:\n raised = True\n\n # verify no exception raised and no changes to tags on resource\n self.assertFalse(raised)\n self.assertEqual(vm.tags, {'testtag': 'testvalue'})", "def test_add_or_update_tags(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tags': {'pre-existing-1': 'unmodified', 'pre-existing-2': 'unmodified'}},\n ],\n })\n p.run()\n\n # verify initial tag set\n s = Session()\n client = s.client('azure.mgmt.resource.ResourceManagementClient')\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'pre-existing-1': 'unmodified', 'pre-existing-2': 'unmodified'})\n\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tags': {'tag1': 'value1', 'pre-existing-1': 'modified'}}\n ],\n })\n p.run()\n\n # verify modified tags\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'tag1': 'value1', 'pre-existing-1': 'modified', 'pre-existing-2': 'unmodified'})", "def test_add_tag_invalid(self):\n payload = {'name': ''}\n res = self.client.post(TAGS_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_tag_invalid(self):\n payload = {'name': ''}\n res = self.client.post(TAGS_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_add_tag_invalid(self):\r\n\r\n with app.test_client() as client: \r\n d = {\"name\": \"\"}\r\n resp = client.post(\"/tags/new\", data=d, follow_redirects=True)\r\n html = resp.get_data(as_text=True)\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn(\"Please enter tag name\", html)", "def test_create_tag_with_invalid_details_invalid(self):\n\n payload = {\n 'name': ''\n }\n\n res = self.client.post(TAGS_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_remove_single_tag(self):\n p = self.load_policy({\n 'name': 'test-azure-remove-single-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tag': 'tag1',\n 'value': 'to-delete'}\n ],\n })\n p.run()\n\n # verify the initial tag set\n s = Session()\n client = s.client('azure.mgmt.compute.ComputeManagementClient')\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'tag1': 'to-delete', 'testtag': 'testvalue'})\n\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'untag',\n 'tags': ['tag1']}\n ],\n })\n p.run()\n\n # verify that the a tag is deleted without modifying existing tags\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'testtag': 'testvalue'})", "def test_create_tag_invalid(self):\n payload = {'name': ''}\n res = self.client.post(TAGS_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_tag_invalid(self):\n payload = {'name':''}\n res = self.client.post(TAGS_URL,payload)\n\n # exist = Tag.objects.filter(\n # user = self.user,\n # name = payload['name']\n # ).exist()\n # self.assertFalse(exist)\n self.assertEqual(res.status_code , status.HTTP_400_BAD_REQUEST)", "def test_create_tag_invalid(self):\n payload = {'name':''}\n res = self.client.post(TAG_URL,payload)\n self.assertEqual(res.status_code,status.HTTP_400_BAD_REQUEST)", "def test_create_tag_invalid(self):\n tag_data = {'name': ''}\n res = self.client.post(TAGS_URL, data=tag_data)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_edit_tag_invalid(self):\r\n\r\n with app.test_client() as client: \r\n d = {\"name\": \"\"}\r\n resp = client.post(f\"/tags/{self.tag.id}/edit\", data=d, follow_redirects=True)\r\n html = resp.get_data(as_text=True)\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn(\"Please enter tag name\", html)", "def test_auto_tag_update_false_noop_for_existing_tag(self, utcnow_mock):\n\n # setup by adding an existing CreatorEmail tag\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tag': 'CreatorEmail',\n 'value': 'do-not-modify'},\n ],\n })\n p.run()\n\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'auto-tag-user',\n 'tag': 'CreatorEmail',\n 'update': False,\n 'days': 10}\n ],\n })\n p.run()\n\n # verify CreatorEmail tag was not modified\n s = Session()\n client = s.client('azure.mgmt.resource.ResourceManagementClient')\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags['CreatorEmail'], 'do-not-modify')", "def test_networking_project_network_tag_put(self):\n pass", "def test_retag_valid_image(self):\n alpine = self.docker.images.get(constant.ALPINE)\n self.assertTrue(alpine.tag(\"demo\", \"rename\"))\n\n alpine = self.docker.images.get(constant.ALPINE)\n self.assertNotIn(\"demo:test\", alpine.tags)", "def test_remove_tags(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tags': {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep',\n 'added-1': 'to-delete', 'added-2': 'to-delete'}},\n ],\n })\n p.run()\n\n # verify initial tag set\n s = Session()\n client = s.client('azure.mgmt.resource.ResourceManagementClient')\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep',\n 'added-1': 'to-delete', 'added-2': 'to-delete'})\n\n p = self.load_policy({\n 'name': 'test-azure-remove-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'untag',\n 'tags': ['added-1', 'added-2']}\n ],\n })\n p.run()\n\n # verify tags removed and pre-existing tags not removed\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep'})", "def test_not_created_with_invalid(self):\n payload = {'name': ''}\n res = self.client.post(TAGS_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_tag_invalid_payload(self):\n\n tag_payload = {'name': ''}\n response = self.client.post(URL_TAGS, tag_payload)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_add_remove_tag(self):\n record = self.good_record()\n # Create a fake record in mongo\n id = self.images.insert(record)\n\n self.assertIsNotNone(id)\n before = self.images.find_one({'_id': id})\n self.assertIsNotNone(before)\n # Add a tag a make sure it worked\n status = self.m.add_tag(id, self.system, 'testtag')\n self.assertTrue(status)\n after = self.images.find_one({'_id': id})\n self.assertIsNotNone(after)\n self.assertIn('testtag', after['tag'])\n self.assertIn(self.tag, after['tag'])\n # Remove a tag and make sure it worked\n status = self.m.remove_tag(self.system, 'testtag')\n self.assertTrue(status)\n after = self.images.find_one({'_id': id})\n self.assertIsNotNone(after)\n self.assertNotIn('testtag', after['tag'])", "def test_create_tag(self):\n\n tag_payload = {'name': 'Test Tag'}\n self.client.post(URL_TAGS, tag_payload)\n\n is_tag_created = Tag.objects.filter(\n user=self.user,\n name=tag_payload['name']\n ).exists()\n\n self.assertTrue(is_tag_created)", "def test_aws_service_api_vm_command_put(self):\n pass", "def test_create_tags_successfull(self):\n payload = {'name': 'Test Tag'}\n self.client.post(TAG_URL, payload)\n exists = Tag.objects.filter(user=self.user, name = payload['name']).exists()\n self.assertTrue(exists)", "def test_create_tag_succesful(self):\n payload = {'name': 'Test tag'}\n res = self.client.post(TAGS_URL, payload)\n\n exists = Tag.objects.filter(\n user=self.user,\n name=payload['name']\n ).exists()\n self.assertTrue(exists)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)", "def _tag_volume():\n if dry:\n print('Would tag the new volume.')\n return True\n\n while True:\n # waiting for the volume to be up to tag it\n i = _fetch('vm')\n v = [x for x in i.volumes.all()]\n if len(v) == 0:\n # volumes should actually be already there once the IP is up\n time.sleep(1)\n else:\n for x in v:\n print('Tagging volume ' + x.id + '.')\n _tag_resource(x)\n break", "def test_add_tagitem(self):\n record = self.good_record()\n record['tag'] = self.tag\n # Create a fake record in mongo\n id = self.images.insert(record)\n\n status = self.m.add_tag(id, self.system, 'testtag')\n self.assertTrue(status)\n rec = self.images.find_one({'_id': id})\n self.assertIsNotNone(rec)\n self.assertIn(self.tag, rec['tag'])\n self.assertIn('testtag', rec['tag'])", "def test_update_task_exceeded_amount_tags(self):\n task_id = util.MOCK_UUID_5\n rv = TEST_CLIENT.patch(\n f\"/tasks/{task_id}\",\n json={\"tags\": [\"tag1\", \"tag2\", \"tag3\", \"tag4\", \"tag5\", \"tag6\"]}\n )\n result = rv.json()\n expected = {\n \"code\": \"ExceededTagAmount\",\n \"message\": \"Tag quantity exceeded maximum allowed\",\n }\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 400)", "def test_add_remove_withtag(self):\n record = self.good_record()\n # Create a fake record in mongo\n id = self.images.insert(record)\n\n session = self.m.new_session(self.auth, self.system)\n i = self.query.copy()\n status = self.m.add_tag(id, self.system, 'testtag')\n self.assertTrue(status)\n rec = self.m.lookup(session, i)\n self.assertIsNotNone(rec)\n self.assertIn(self.tag, rec['tag'])\n self.assertIn('testtag', rec['tag'])", "def test_add_tag_successful(self):\n payload = {'name': 'test tag'}\n self.client.post(TAGS_URL, payload)\n\n # self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n exists = Tag.objects.filter(\n user=self.user,\n name=payload['name']\n ).exists()\n self.assertTrue(exists)", "def _verify_tags(self):\n for tag in self.tags:\n if tag.lower() in VASP_TAG_LIST:\n continue\n else:\n print((\"Warning: unknown INCAR tag '\" + tag + \"' with value '\" + str(self.tags[tag]) + \"'\"))" ]
[ "0.77649385", "0.7387739", "0.7075966", "0.6656576", "0.65036297", "0.6465619", "0.6458824", "0.6456048", "0.6449689", "0.6442221", "0.6399257", "0.63945055", "0.6361586", "0.63506293", "0.6283683", "0.62317973", "0.6213603", "0.6211985", "0.61786103", "0.6172332", "0.61618245", "0.6127407", "0.6123977", "0.6122753", "0.61127603", "0.61106896", "0.6093558", "0.60760164", "0.6073244", "0.60141414" ]
0.7408799
1
Adds tags to an empty resource group, then updates one tag and adds a new tag
def test_add_or_update_tags(self): p = self.load_policy({ 'name': 'test-azure-tag', 'resource': 'azure.resourcegroup', 'filters': [ {'type': 'value', 'key': 'name', 'op': 'eq', 'value_type': 'normalize', 'value': 'test_vm'} ], 'actions': [ {'type': 'tag', 'tags': {'pre-existing-1': 'unmodified', 'pre-existing-2': 'unmodified'}}, ], }) p.run() # verify initial tag set s = Session() client = s.client('azure.mgmt.resource.ResourceManagementClient') rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0] self.assertEqual(rg.tags, {'pre-existing-1': 'unmodified', 'pre-existing-2': 'unmodified'}) p = self.load_policy({ 'name': 'test-azure-tag', 'resource': 'azure.resourcegroup', 'filters': [ {'type': 'value', 'key': 'name', 'op': 'eq', 'value_type': 'normalize', 'value': 'test_vm'} ], 'actions': [ {'type': 'tag', 'tags': {'tag1': 'value1', 'pre-existing-1': 'modified'}} ], }) p.run() # verify modified tags rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0] self.assertEqual(rg.tags, {'tag1': 'value1', 'pre-existing-1': 'modified', 'pre-existing-2': 'unmodified'})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_tags_to_resource(ResourceId=None, Tags=None):\n pass", "def add_tags(ResourceArn=None, Tags=None):\n pass", "def _add_tags(self):\n\n if self.version != 'live':\n return\n\n tags = [t.strip() for t in self.tags_text.split(',')]\n tags = list(set(tags))\n\n for tag_name in tags:\n tag_slug = slugify(tag_name)\n if tag_slug:\n try:\n tag = Tag.objects.get(blog=self.blog, slug=tag_slug)\n except Tag.DoesNotExist:\n tag = Tag( blog = self.blog,\n name = tag_name,\n slug = tag_slug)\n\n tag.increment()\n tag.save()\n\n self.tags.add(tag)", "def tests_ti_document_add_tag(self, request: FixtureRequest):\n super().group_add_tag(request)", "def _merge_tags_into(self, target):\n for tag in self.tags.all():\n if target.tags.filter(tag=tag.tag).exists():\n tag.delete()\n else:\n tag.assignment_group = target\n tag.save()", "def update_tag(tag):\n remove_tag(tag)\n add_tag(tag)", "def tag_updater(self, tags):\n for tag in tags:\n #check if the tag exists\n exists = False\n tag = self.tags.find_one({'TagName': tag})\n if tag is not None:\n self.tags.update_one({'TagName': tag}, {'$set': {'Count': tag['Count']+1}}) \n else:\n #insert new tag\n Id = self.id_generator(self.tags)\n self.tags.insert_one({\"Id\":Id, \"TagName\":tag, \"Count\":0})", "def testAddTag(self):\n project = self.session.create_project()\n\n project.add_tag(\"test\")\n self.assertEqual(project.tags, [\"test\"], \"Can add a tag to a project.\")\n\n json_str = project.to_json()\n doc = json.loads(json_str)\n\n self.assertEqual(doc['meta']['tags'], [\"test\"],\n \"JSON representation had correct tags after add_tag().\")\n\n # Try adding the same tag yet again, shouldn't get a duplicate\n with self.assertRaises(ValueError):\n project.add_tag(\"test\")\n\n json_str = project.to_json()\n doc2 = json.loads(json_str)\n\n self.assertEqual(doc2['meta']['tags'], [\"test\"],\n \"JSON document did not end up with duplicate tags.\")", "def add_tagging(self, task_instance):", "def tag_group_system(client, key, systemid, tagname=\"\"):\n import datetime\n tagname += datetime.datetime.now().strftime(\"%Y%m%d%H%M%S%f\")\n snaplist = client.system.provisioning.snapshot.list_snapshots(key, systemid, {})\n client.system.provisioning.snapshot.addTagToSnapshot(key, snaplist[0].get('id'), tagname)", "def test_add_remove_tag(self):\n record = self.good_record()\n # Create a fake record in mongo\n id = self.images.insert(record)\n\n self.assertIsNotNone(id)\n before = self.images.find_one({'_id': id})\n self.assertIsNotNone(before)\n # Add a tag a make sure it worked\n status = self.m.add_tag(id, self.system, 'testtag')\n self.assertTrue(status)\n after = self.images.find_one({'_id': id})\n self.assertIsNotNone(after)\n self.assertIn('testtag', after['tag'])\n self.assertIn(self.tag, after['tag'])\n # Remove a tag and make sure it worked\n status = self.m.remove_tag(self.system, 'testtag')\n self.assertTrue(status)\n after = self.images.find_one({'_id': id})\n self.assertIsNotNone(after)\n self.assertNotIn('testtag', after['tag'])", "def append_tags(self, tags):\n\n tags = H.to_list(tags)\n # self._tags.update(tags)\n self.tags.update(tags)", "def add_tags_recursive(self, tags2add: List[str]) -> None:\n self.tags += tags2add\n for data in self._child_data.values():\n data.add_tags_recursive(tags2add)", "def update(self, instance, validated_data):\n if 'tags' in validated_data:\n tags_data = validated_data.pop('tags')\n for tag_data in tags_data:\n instance.tags.add(tag_data)\n\n super(ProjectSerializer, self).update(instance, validated_data)\n return instance", "def add_tags(event):\n\n add_tags_from_presets()", "def test_add_or_update_single_tag(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tag': 'tag1',\n 'value': 'value1'}\n ],\n })\n p.run()\n\n # verify that the a new tag is added without modifying existing tags\n s = Session()\n client = s.client('azure.mgmt.compute.ComputeManagementClient')\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'tag1': 'value1', 'testtag': 'testvalue'})", "async def addtags(self, ctx, tag, *, data):\r\n\t\tTag = self.settings.ServerConfig(ctx.guild.id, 'Tags')\r\n\t\tif not tag in Tag:\r\n\t\t\tTag[tag] = self.Conf.Tags\r\n\t\t\tawait ctx.send('Added Tag: {}'.format(tag))\r\n\t\telse:\r\n\t\t\tawait ctx.send('Edited Tag: '.format(tag))\r\n\r\n\t\tnowgmt = time.strftime(\"%H:%M:%S, %d/%m/%Y\", time.gmtime())\r\n\t\t\r\n\t\tTag[tag]['user'] = ctx.author.id\r\n\t\tTag[tag]['data'] = data\r\n\t\tTag[tag]['time'] = nowgmt\r\n\t\tself.settings.ServerConfig(ctx.guild.id, 'Tags', Tag)", "def test_add_tag(self):\n fc = self.read_feature(region='Adriatic_Sea')\n\n fc.tag(tags=['tag1', 'tag2', 'Mediterranean_Basin'])\n assert (fc.features[0]['properties']['tags'] ==\n 'Adriatic_Sea;Mediterranean_Basin;tag1;tag2')\n\n self.check_feature(fc.features[0])", "def add(self, tag):\n self.tags[tag.name] = tag", "def AddGroupTags(self, group, tags, dry_run=False, reason=None):\n query = [(\"tag\", t) for t in tags]\n _AppendDryRunIf(query, dry_run)\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/groups/%s/tags\" %\n (GANETI_RAPI_VERSION, group)), query, None)", "def add_tag(self, key, value=''):\r\n status = self.connection.create_tags([self.id], {key : value})\r\n if self.tags is None:\r\n self.tags = TagSet()\r\n self.tags[key] = value", "def add_tag(self, tag: str) -> None:\n tags = self.get_tag_index()\n tags.append(tag)\n self.write_tag_index(list(set(tags)))", "def add_tag(self, tag):\n self.tags.append(tag)", "def AddTagsToResource(self, ResourceId, TagName, TagValue):\n\n Client = boto3.client(self.Service)\n\n if self.Service == 'rds':\n response = Client.add_tags_to_resource (\n ResourceName = ResourceId,\n Tags = [\n {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'elasticache':\n response = Client.add_tags_to_resource (\n ResourceName = ResourceId,\n Tags = [\n {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'ds':\n response = Client.add_tags_to_resource (\n ResourceId = ResourceId,\n Tags = [\n {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n\t\t]\n\t )\n else:\n raise TagNotSupportedError(str(self.Service))\n\n return True", "def add_tags(self, tags):\n cp = self.copy()\n cp.tags = cp.tags.union(set(tags))\n return cp", "def test_update_resource_group(self):\n pass", "def add_tags(self, tags):\n\n if isinstance(tags, string_types):\n message = \"tags should be a list or None, got tags={}\".format(tags)\n raise TypeError(message)\n\n self.tags = self.tags.union(tags)", "def test_add_defined_tag_to_bucket(self, test, object_storage, with_or_without_compartment):\n namespace_name, bucket_name = self._get_bucket_details(object_storage)\n session_factory = test.oci_session_factory()\n policy = test.load_policy(\n {\n \"name\": \"add-defined-tag-to-bucket\",\n \"resource\": \"oci.bucket\",\n \"query\": [\n {\"namespace_name\": namespace_name},\n ],\n \"filters\": [\n {\"type\": \"value\", \"key\": \"name\", \"value\": bucket_name},\n ],\n \"actions\": [{\"type\": \"update\", \"defined_tags\": self.get_defined_tag(\"add_tag\")}],\n },\n session_factory=session_factory,\n )\n policy.run()\n resource = self._fetch_bucket_validation_data(\n policy.resource_manager, namespace_name, bucket_name\n )\n test.assertEqual(resource[\"name\"], bucket_name)\n test.assertEqual(self.get_defined_tag_value(resource[\"defined_tags\"]), \"true\")", "def add_asset_tags(self, tags, asset_tags_uid, tagging_event_uid):\n for tag in tags:\n tag['event_id'] = tagging_event_uid\n schema_tags = schema_asset_tags['properties']['tags']\n validation_errors = self.validate_json(tags, schema_tags)\n\n if len(validation_errors) == 0:\n # TODO: what if query fails\n doc_tags = self._collection_asset_tags.find_one({'uid': asset_tags_uid})\n doc_tags['tags'].extend(tags)\n\n # Takes a asset tag uid key to find one to change and then passes in\n # new tag array\n self._collection_asset_tags.update_one(\n {'uid': asset_tags_uid},\n {'$set': {'tags': doc_tags['tags']}})\n\n doc_tags = self._collection_asset_tags.find_one({'uid': asset_tags_uid})\n self._clean_mongo_ids(doc_tags)\n return doc_tags\n else:\n raise BadDataError(\"Bad data\", validation_errors)\n # return {\"Bad data, error(s) were encountered\": validation_errors}", "def add_tag(request):\n if request.method != 'POST':\n return HttpResponseRedirect(reverse('wainz.views.composite'))\n else:\n img_id = request.POST['id']\n try:\n img = Image.objects.get(pk=img_id)\n except:\n return HttpResponseRedirect(reverse('wainz.views.composite'))\n tag_val = request.POST['tag']\n try:\n tag = tag_utils.TagsFromText(tag_val)[0]\n added = True\n img.tags.add(tag)\n img.save()\n except:\n added = False\n resp = rest.rest_success(request, img_id)\n respJson = json.loads(resp.content)\n respJson['added'] = added\n resp.content = json.dumps(respJson)\n return resp" ]
[ "0.6558311", "0.62224245", "0.61563283", "0.6131066", "0.6068289", "0.60150707", "0.59856796", "0.5970131", "0.5950358", "0.5894891", "0.5828419", "0.5818687", "0.57768595", "0.57686335", "0.575219", "0.5750683", "0.57304066", "0.5720598", "0.5716028", "0.57158905", "0.57109183", "0.5697749", "0.56921875", "0.56789625", "0.5669354", "0.5659561", "0.56083655", "0.56050676", "0.55905586", "0.5584724" ]
0.6548033
1
Verifies we can delete a tag to a VM and not modify an existing tag on that resource
def test_remove_single_tag(self): p = self.load_policy({ 'name': 'test-azure-remove-single-tag', 'resource': 'azure.vm', 'filters': [ {'type': 'value', 'key': 'name', 'op': 'eq', 'value_type': 'normalize', 'value': 'cctestvm'} ], 'actions': [ {'type': 'tag', 'tag': 'tag1', 'value': 'to-delete'} ], }) p.run() # verify the initial tag set s = Session() client = s.client('azure.mgmt.compute.ComputeManagementClient') vm = client.virtual_machines.get('test_vm', 'cctestvm') self.assertEqual(vm.tags, {'tag1': 'to-delete', 'testtag': 'testvalue'}) p = self.load_policy({ 'name': 'test-azure-tag', 'resource': 'azure.vm', 'filters': [ {'type': 'value', 'key': 'name', 'op': 'eq', 'value_type': 'normalize', 'value': 'cctestvm'} ], 'actions': [ {'type': 'untag', 'tags': ['tag1']} ], }) p.run() # verify that the a tag is deleted without modifying existing tags vm = client.virtual_machines.get('test_vm', 'cctestvm') self.assertEqual(vm.tags, {'testtag': 'testvalue'})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_removal_does_not_raise_on_nonexistent_tag(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'untag',\n 'tags': ['tag-does-not-exist']},\n ],\n })\n\n # verify initial tag set is empty\n s = Session()\n client = s.client('azure.mgmt.compute.ComputeManagementClient')\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'testtag': 'testvalue'})\n\n raised = False\n try:\n p.run()\n except KeyError:\n raised = True\n\n # verify no exception raised and no changes to tags on resource\n self.assertFalse(raised)\n self.assertEqual(vm.tags, {'testtag': 'testvalue'})", "def test_remove_tags(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tags': {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep',\n 'added-1': 'to-delete', 'added-2': 'to-delete'}},\n ],\n })\n p.run()\n\n # verify initial tag set\n s = Session()\n client = s.client('azure.mgmt.resource.ResourceManagementClient')\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep',\n 'added-1': 'to-delete', 'added-2': 'to-delete'})\n\n p = self.load_policy({\n 'name': 'test-azure-remove-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'untag',\n 'tags': ['added-1', 'added-2']}\n ],\n })\n p.run()\n\n # verify tags removed and pre-existing tags not removed\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep'})", "def test_delete_tag(self):\r\n\r\n with app.test_client() as client:\r\n resp = client.post(f\"/tags/{self.tag.id}/delete\", follow_redirects=True)\r\n html = resp.get_data(as_text=True)\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertNotIn(\"Marvel\", html)", "def test_aws_service_api_vm_delete(self):\n pass", "def _destroy_resource(resource):\n global _existing\n if _existing[resource]:\n print('{v} a {r} with id: {i}.'.format(\n v='Would destroy' if dry else 'Destroying',\n r=resource,\n i=_existing[resource].id\n ))\n\n if dry:\n return True\n else:\n try:\n # _existing[resource].delete()\n getattr(_existing[resource], definitions[resource].destroy)()\n\n if resource == 'vm':\n # untag resource in case a UP follow very quickly: the instance,\n # although terminating, still exists for a while\n print('Postfixing tag of instance {} with -terminated'.format(_existing[resource].id))\n _tag_resource(_existing[resource], tags={args.tag: args.role + '-terminated'})\n\n _existing[resource] = None\n\n except AttributeError as e:\n\n if resource == 'vm':\n state = _existing[resource].state['Name']\n if state in ['terminated', 'shutting-down']:\n print('Trying to delete a vm {i} wich is {s}. not an issue.'.format(\n i=_existing[resource].id,\n s=state\n ))\n return True\n\n # all other cases are problems\n traceback.print_exc()\n return False\n\n except Exception as e:\n print('Could not destroy resource {r}, id {i}. Reason just below.'.format(\n r=resource,\n i=_existing[resource].id,\n ))\n traceback.print_exc()\n return False\n return True\n else:\n print('Trying to destroy a {r} tagged {k}:{v}, but none found'.format(\n r=resource,\n k=args.tag,\n v=args.role\n ))\n return False", "def test_networking_project_network_tag_delete(self):\n pass", "def test_aws_service_api_vm_tag_put(self):\n pass", "def test_delete_image_by_wrong_tag(self, test_image):\n tag = f\"{TEST_IMAGE_NAME}:wrong_tag\"\n assert image_exists(TEST_IMAGE_NAME)\n assert not delete_image(tag, force=True)\n assert image_exists(TEST_IMAGE_NAME)\n\n # now delete using that tag, both tags will be gone because it's the same image.\n build_test_image(tag=tag)\n assert image_exists(TEST_IMAGE_NAME)\n assert image_exists(tag)\n assert delete_image(tag, force=True)\n assert not image_exists(TEST_IMAGE_NAME)\n assert not image_exists(tag)", "def _delete_tag_request():\n key = helpers.get('Tag.1.Key')\n resource_id = helpers.get('ResourceId.1')\n\n if resource_id in current_app.config['RESOURCE_TYPE_MAP']:\n resource_type = current_app.config['RESOURCE_TYPE_MAP'][resource_id]\n else:\n errors.invalid_request(\n str(resource_id) + \" not found in configuration\")\n\n args = {\n 'command': 'deleteTags',\n 'resourceids': resource_id,\n 'resourcetype': resource_type,\n 'tags[0].key': key\n }\n\n response = requester.make_request_async(args)\n\n return response", "def test_vault_delete_vault_item(self):\n pass", "def delete_tag(tag):\n tag.destroy()", "def test_remove_defined_tag(self, test, object_storage):\n namespace_name, bucket_name = self._get_bucket_details(object_storage)\n session_factory = test.oci_session_factory()\n policy = test.load_policy(\n {\n \"name\": \"bucket-remove-tag\",\n \"resource\": \"oci.bucket\",\n \"filters\": [\n {\"type\": \"value\", \"key\": \"name\", \"value\": bucket_name},\n ],\n \"actions\": [\n {\n \"type\": \"remove-tag\",\n \"defined_tags\": [\"cloud-custodian-test.mark-for-resize\"],\n },\n ],\n },\n session_factory=session_factory,\n )\n policy.run()\n resource = self._fetch_bucket_validation_data(\n policy.resource_manager, namespace_name, bucket_name\n )\n test.assertEqual(resource[\"name\"], bucket_name)\n test.assertEqual(self.get_defined_tag_value(resource[\"defined_tags\"]), None)", "def delete_tag(self,tag):\r\n\r\n # with shelf\r\n if self.using_shelf:\r\n del self.tag_dict[tag]", "def test_aws_service_api_volume_delete(self):\n pass", "def pre_virtual_machine_delete(self, resource_id):\n pass", "def test_del_tag(driver):\n print(\"-\"*80)\n print(\"Test: Deleting a tag\")\n print(\"-\"*80)\n # Select a random receipt\n receipts = driver.find_elements_by_class_name('receipt')\n index_of_random_receipt = random.randint(0, len(receipts)-1)\n e = receipts[index_of_random_receipt]\n\n # Click on the add-tag element\n tags = get_tags(e)\n if not tags:\n add_tag(e, driver)\n tags = get_tags(e)\n\n e_tag = random.choice(e.find_elements_by_class_name('tagValue'))\n tag = e_tag.text\n e_tag.click(); time.sleep(1)\n\n # Receipts DOM might have been deleted or re-drawn, pull it again\n receipts = driver.find_elements_by_class_name('receipt')\n e = receipts[index_of_random_receipt]\n new_tags = get_tags(e)\n removed_tag_ = list(set(tags) - set(new_tags))\n if len(removed_tag_) != 1 or removed_tag_[0] != tag:\n print(\"\"\" Removed tags: {} (Should be only [{}])\"\n \"\"\".format(removed_tag_, tag))\n print(\"\"\"This error might not be your fault. Either my code, or \n the Selenium driver is buggy. Report this problem to us. We will \n fix it, but in the mean time make sure the deletion works on UI.\"\"\")\n return -1\n else:\n print(\"Success!!!\")\n print('<>'*40 + '\\n')\n return 0", "def test_remove_tag(self):\n fc = self.read_feature(region='Adriatic_Sea')\n\n fc.tag(tags=['Mediterranean_Basin', 'tag1'], remove=True)\n assert (fc.features[0]['properties']['tags'] == 'Adriatic_Sea')\n\n self.check_feature(fc.features[0])", "def pre_virtual_network_delete(self, resource_id):\n pass", "def test_create_tag_invalid(self):\n payload = {'name':''}\n res = self.client.post(TAGS_URL,payload)\n\n # exist = Tag.objects.filter(\n # user = self.user,\n # name = payload['name']\n # ).exist()\n # self.assertFalse(exist)\n self.assertEqual(res.status_code , status.HTTP_400_BAD_REQUEST)", "def test_add_or_update_single_tag(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tag': 'tag1',\n 'value': 'value1'}\n ],\n })\n p.run()\n\n # verify that the a new tag is added without modifying existing tags\n s = Session()\n client = s.client('azure.mgmt.compute.ComputeManagementClient')\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'tag1': 'value1', 'testtag': 'testvalue'})", "def test_reserve_reserve_delete(self, mock_allowed):\n mock_allowed.return_value = None\n volume_params = {'status': 'available'}\n\n vref = tests_utils.create_volume(self.context, **volume_params)\n aref = self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n vref = objects.Volume.get_by_id(self.context,\n vref.id)\n self.assertEqual('reserved', vref.status)\n\n self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n vref = objects.Volume.get_by_id(self.context,\n vref.id)\n self.assertEqual('reserved', vref.status)\n self.volume_api.attachment_delete(self.context,\n aref)\n mock_allowed.assert_called_once_with(self.context, aref)\n vref = objects.Volume.get_by_id(self.context,\n vref.id)\n self.assertEqual('reserved', vref.status)\n self.assertEqual(1, len(vref.volume_attachment))", "def test_delete_deployment(self):\n pass", "def test_edit_tag_invalid(self):\r\n\r\n with app.test_client() as client: \r\n d = {\"name\": \"\"}\r\n resp = client.post(f\"/tags/{self.tag.id}/edit\", data=d, follow_redirects=True)\r\n html = resp.get_data(as_text=True)\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn(\"Please enter tag name\", html)", "def test_ipam_vrfs_delete(self):\n pass", "async def delete(self, ctx: \"IceTeaContext\", *, otag: TagConverter):\n tag: models.Tag = otag\n if tag.alias:\n if ctx.author.guild_permissions.administrator or tag.author == ctx.author.id:\n try:\n await tag.delete()\n await ctx.send(\"aliases deleted\")\n except:\n await ctx.send(\"Alias unsuccessfully deleted\")\n elif not tag.alias:\n if ctx.author.guild_permissions.administrator or tag.author == ctx.author.id:\n try:\n await tag.delete()\n await ctx.send(\"Tag and all aliases deleted\")\n except:\n await ctx.send(\"Tag unsuccessfully deleted\")\n else:\n await ctx.send(\"No Tag with that name found\")", "def delete_tag(filename, tag_name):\n storeapps = APP.config[\"storage\"]\n filename = filename.encode(\"utf-8\")\n\n try:\n application = list(nativeapps.io.ls(storeapps, r\".*\" + filename + \"$\"))[0]\n meta_path = os.path.join(os.path.dirname(application), \"metadata.json\")\n metadata = json.loads(nativeapps.io.readfile(meta_path))\n tags = metadata.get(\"tags\", [])\n if tag_name in tags:\n tags.remove(tag_name)\n metadata[\"tags\"] = tags\n nativeapps.io.writefile(meta_path, json.dumps(metadata))\n except IndexError:\n return \"Unknown application: %s\" % (application), 404\n\n return \"removed\", 200", "def test_vault_delete_vault_section(self):\n pass", "def delete(self, tag, params={}, **options):\n path = \"/tags/%s\" % (tag)\n return self.client.delete(path, params, **options)", "def test_remove_freeform_tag(self, test, object_storage):\n namespace_name, bucket_name = self._get_bucket_details(object_storage)\n session_factory = test.oci_session_factory()\n policy = test.load_policy(\n {\n \"name\": \"bucket-remove-tag\",\n \"resource\": \"oci.bucket\",\n \"query\": [\n {\"namespace_name\": namespace_name},\n ],\n \"filters\": [\n {\"type\": \"value\", \"key\": \"name\", \"value\": bucket_name},\n ],\n \"actions\": [\n {\"type\": \"remove-tag\", \"freeform_tags\": [\"Project\"]},\n ],\n },\n session_factory=session_factory,\n )\n policy.run()\n resource = self._fetch_bucket_validation_data(\n policy.resource_manager, namespace_name, bucket_name\n )\n test.assertEqual(resource[\"name\"], bucket_name)\n test.assertEqual(resource[\"freeform_tags\"].get(\"Project\"), None)", "def post_virtual_machine_delete(self, resource_id, resource_dict):\n pass" ]
[ "0.7614638", "0.7149751", "0.7064771", "0.7040828", "0.69282234", "0.6877819", "0.6588053", "0.6364861", "0.63523936", "0.6196349", "0.61579126", "0.6140006", "0.6125979", "0.6120203", "0.6086665", "0.60324454", "0.6031246", "0.60080886", "0.60051006", "0.5979031", "0.5945604", "0.5938687", "0.5927817", "0.59192306", "0.59189725", "0.5915854", "0.5898313", "0.58777285", "0.5877536", "0.5870061" ]
0.7371263
1
Verifies we can delete multiple tags from a resource group without modifying existing tags.
def test_remove_tags(self): p = self.load_policy({ 'name': 'test-azure-tag', 'resource': 'azure.resourcegroup', 'filters': [ {'type': 'value', 'key': 'name', 'op': 'eq', 'value_type': 'normalize', 'value': 'test_vm'} ], 'actions': [ {'type': 'tag', 'tags': {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep', 'added-1': 'to-delete', 'added-2': 'to-delete'}}, ], }) p.run() # verify initial tag set s = Session() client = s.client('azure.mgmt.resource.ResourceManagementClient') rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0] self.assertEqual(rg.tags, {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep', 'added-1': 'to-delete', 'added-2': 'to-delete'}) p = self.load_policy({ 'name': 'test-azure-remove-tag', 'resource': 'azure.resourcegroup', 'filters': [ {'type': 'value', 'key': 'name', 'op': 'eq', 'value_type': 'normalize', 'value': 'test_vm'} ], 'actions': [ {'type': 'untag', 'tags': ['added-1', 'added-2']} ], }) p.run() # verify tags removed and pre-existing tags not removed rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0] self.assertEqual(rg.tags, {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep'})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_resource_group(self):\n pass", "def bulk_delete(self, **kwargs: Any) -> Response:\n tags = kwargs[\"rison\"]\n try:\n DeleteTagsCommand(tags).run()\n return self.response(200, message=f\"Deleted {len(tags)} tags\")\n except TagNotFoundError:\n return self.response_404()\n except TagInvalidError as ex:\n return self.response(422, message=f\"Invalid tag parameters: {tags}. {ex}\")\n except TagDeleteFailedError as ex:\n return self.response_422(message=str(ex))", "def test_products_ref_groups_delete(self):\n pass", "def test_delete_groups(self):\n pass", "def test_delete_group(self):\n pass", "def test_delete_group(self):\n pass", "def test_delete_collection_group(self):\n pass", "def test_aws_service_api_vm_security_group_delete(self):\n pass", "def delete_tags(self, resource_ids, tags):\r\n if isinstance(tags, list):\r\n tags = {}.fromkeys(tags, None)\r\n params = {}\r\n self.build_list_params(params, resource_ids, 'ResourceId')\r\n self.build_tag_param_list(params, tags)\r\n return self.get_status('DeleteTags', params, verb='POST')", "def test_delete_tag(self):\r\n\r\n with app.test_client() as client:\r\n resp = client.post(f\"/tags/{self.tag.id}/delete\", follow_redirects=True)\r\n html = resp.get_data(as_text=True)\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertNotIn(\"Marvel\", html)", "def test_has_tagged_trait_remove_buttons(self):\n tagged_traits = TaggedTraitFactory.create_batch(2, trait=self.trait)\n response = self.client.get(self.get_url(self.trait.pk))\n context = response.context\n for (a, b) in context['tagged_traits_with_xs']:\n self.assertTrue(b)\n for tt in tagged_traits:\n self.assertContains(response, reverse('tags:tagged-traits:pk:delete', kwargs={'pk': tt.pk}))", "def test_has_tagged_trait_remove_buttons(self):\n tagged_traits = TaggedTraitFactory.create_batch(2, trait=self.trait)\n response = self.client.get(self.get_url(self.trait.pk))\n context = response.context\n for (a, b) in context['tagged_traits_with_xs']:\n self.assertTrue(b)\n for tt in tagged_traits:\n self.assertContains(response, reverse('tags:tagged-traits:pk:delete', kwargs={'pk': tt.pk}))", "def validate_deletions(taglist):\n preexisting_keys = list_of_keys_of(taglist.current_list)\n keys_of_tags_to_delete = unicode_decode_keys(taglist.deletions)\n\n non_existent_key_set = list(set(keys_of_tags_to_delete) - set(preexisting_keys))\n\n if non_existent_key_set:\n raise_validation_error(\n problematic_key_set=non_existent_key_set,\n problem_message=strings['tags.tag_keys_dont_exist_for_deletion'],\n exception_class=InvalidAttemptToModifyTagsError\n )", "def test_forbidden_non_taggers(self):\n phenotype_taggers = Group.objects.get(name='phenotype_taggers')\n self.user.groups.remove(phenotype_taggers)\n response = self.client.get(self.get_url(self.trait.pk))\n self.assertEqual(response.status_code, 403)", "def delete_tags(ResourceArn=None, TagKeys=None):\n pass", "def delete_tags(ResourceArn=None, TagKeys=None):\n pass", "def test_groups_group_ref_delete(self):\n pass", "def test_tags(question):\n assert \"tags\" in question[\"instance\"]\n tags = set(question[\"instance\"][\"tags\"])\n # there should be at least one tag\n assert len(tags) >= 1\n # each tags should be in VALID_TAGS\n assert len(tags - VALID_TAGS) == 0\n # there should be exactly one category-defining tag\n assert len(tags.intersection(CATEGORY_TAGS)) == 1", "def _delete_tag_request():\n key = helpers.get('Tag.1.Key')\n resource_id = helpers.get('ResourceId.1')\n\n if resource_id in current_app.config['RESOURCE_TYPE_MAP']:\n resource_type = current_app.config['RESOURCE_TYPE_MAP'][resource_id]\n else:\n errors.invalid_request(\n str(resource_id) + \" not found in configuration\")\n\n args = {\n 'command': 'deleteTags',\n 'resourceids': resource_id,\n 'resourcetype': resource_type,\n 'tags[0].key': key\n }\n\n response = requester.make_request_async(args)\n\n return response", "def test_sg_delete_non_associated(self):\n\n # Add a faked storage group to be tested and another one\n faked_storage_group = self.add_storage_group1()\n self.add_storage_group2()\n\n storage_group_mgr = self.console.storage_groups\n\n storage_group = storage_group_mgr.find(name=faked_storage_group.name)\n\n # Execute the code to be tested.\n storage_group.delete()\n\n # Check that the storage group no longer exists\n with pytest.raises(NotFound):\n storage_group_mgr.find(name=faked_storage_group.name)", "def test_networking_project_network_tag_delete(self):\n pass", "def test_delete_multiple_templates_success(self):\n template_id_1 = util.MOCK_UUID_1\n template_id_2 = util.MOCK_UUID_2\n\n rv = TEST_CLIENT.post(\n \"/templates/deletetemplates\", json=[template_id_1, template_id_2]\n )\n result = rv.json()\n\n expected = {\"message\": \"Successfully removed templates\"}\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 200)", "def test_ipam_vlan_groups_delete(self):\n pass", "def test_groups_group_users_delete(self):\n pass", "def test_groups_group_users_delete(self):\n pass", "def test_delete_link_resources(self):\n g = groups.get_by_name(\"First Group\")\n \n self.open_url('/group/list')\n \n deletelink = self.wd.find_element(By.ID, \"delete-link-{0}\".format(g.id))\n deletelink.click()\n \n self.assertEquals('Delete Group', self.wd.title)\n \n self.submit_form(\"delete_form\")\n \n alert = self.wd.switch_to_alert()\n self.assertEqual(\"Are you sure you wish to permanently delete this group and specified resources?\", alert.text)\n alert.accept()\n \n self.assert_notification(\"Group deleted: {0} (id={1})\".format(g.name, g.id))\n self.assert_not_in_list_table(g.name)", "def test_delete_group(self, inventoryloader):\n cg = inventoryloader.count_groups()\n ch = inventoryloader.count_hosts()\n inventoryloader.del_group('glance_api')\n assert 'glance_api' not in inventoryloader.groups['glance_all'].children\n assert 'glance_api' not in inventoryloader.hosts['localhost'].groups\n assert 'glance_api' not in inventoryloader.groups\n assert inventoryloader.count_groups() == cg -1\n assert inventoryloader.count_hosts() == ch", "def test_delete_group(self):\n self.group.delete_group.return_value = succeed('del')\n result = self.perform_with_group(\n Effect(DeleteGroup(tenant_id='00', group_id='g1')),\n (self.log, '00', 'g1'), self.group)\n self.assertEqual(result, 'del')", "def test_080_group_delete(self):\n\n testflow.step(RMV_GRP_MSG, TEST_GROUP_DELETE)\n assert GROUP_CLI.run(\n 'delete',\n TEST_GROUP_DELETE\n )[0], \"Failed to delete group '%s'\" % TEST_GROUP_DELETE", "def test_api_v1_groups_id_delete(self):\n pass" ]
[ "0.67967415", "0.649529", "0.62559175", "0.6253165", "0.62317085", "0.62317085", "0.6177669", "0.61279553", "0.6117101", "0.60856915", "0.6035331", "0.6035331", "0.60039115", "0.59949845", "0.59554094", "0.59554094", "0.5923172", "0.58898515", "0.5887204", "0.5866109", "0.58563185", "0.58459496", "0.58455426", "0.58277696", "0.58277696", "0.57754284", "0.5762992", "0.5760173", "0.5746677", "0.57384294" ]
0.66661125
1
Verifies attempting to delete a tag that is not on the resource does not throw an error
def test_removal_does_not_raise_on_nonexistent_tag(self): p = self.load_policy({ 'name': 'test-azure-tag', 'resource': 'azure.vm', 'filters': [ {'type': 'value', 'key': 'name', 'op': 'eq', 'value_type': 'normalize', 'value': 'cctestvm'} ], 'actions': [ {'type': 'untag', 'tags': ['tag-does-not-exist']}, ], }) # verify initial tag set is empty s = Session() client = s.client('azure.mgmt.compute.ComputeManagementClient') vm = client.virtual_machines.get('test_vm', 'cctestvm') self.assertEqual(vm.tags, {'testtag': 'testvalue'}) raised = False try: p.run() except KeyError: raised = True # verify no exception raised and no changes to tags on resource self.assertFalse(raised) self.assertEqual(vm.tags, {'testtag': 'testvalue'})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_tag(self):\r\n\r\n with app.test_client() as client:\r\n resp = client.post(f\"/tags/{self.tag.id}/delete\", follow_redirects=True)\r\n html = resp.get_data(as_text=True)\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertNotIn(\"Marvel\", html)", "def _destroy_resource(resource):\n global _existing\n if _existing[resource]:\n print('{v} a {r} with id: {i}.'.format(\n v='Would destroy' if dry else 'Destroying',\n r=resource,\n i=_existing[resource].id\n ))\n\n if dry:\n return True\n else:\n try:\n # _existing[resource].delete()\n getattr(_existing[resource], definitions[resource].destroy)()\n\n if resource == 'vm':\n # untag resource in case a UP follow very quickly: the instance,\n # although terminating, still exists for a while\n print('Postfixing tag of instance {} with -terminated'.format(_existing[resource].id))\n _tag_resource(_existing[resource], tags={args.tag: args.role + '-terminated'})\n\n _existing[resource] = None\n\n except AttributeError as e:\n\n if resource == 'vm':\n state = _existing[resource].state['Name']\n if state in ['terminated', 'shutting-down']:\n print('Trying to delete a vm {i} wich is {s}. not an issue.'.format(\n i=_existing[resource].id,\n s=state\n ))\n return True\n\n # all other cases are problems\n traceback.print_exc()\n return False\n\n except Exception as e:\n print('Could not destroy resource {r}, id {i}. Reason just below.'.format(\n r=resource,\n i=_existing[resource].id,\n ))\n traceback.print_exc()\n return False\n return True\n else:\n print('Trying to destroy a {r} tagged {k}:{v}, but none found'.format(\n r=resource,\n k=args.tag,\n v=args.role\n ))\n return False", "def test_delete_non_existing_resource(self):\n CommonTestCases.admin_token_assert_in(\n self,\n delete_assigned_resource_from_non_existing_resource,\n \"Resource does not exist\"\n )", "def test_networking_project_network_tag_delete(self):\n pass", "def test_create_tag_invalid(self):\n payload = {'name':''}\n res = self.client.post(TAGS_URL,payload)\n\n # exist = Tag.objects.filter(\n # user = self.user,\n # name = payload['name']\n # ).exist()\n # self.assertFalse(exist)\n self.assertEqual(res.status_code , status.HTTP_400_BAD_REQUEST)", "def test_before_delete_for_linked_resource(self):\n resource = factories.Resource()\n helpers.call_action(\"resource_delete\", id=resource[\"id\"])\n with pytest.raises(p.toolkit.ObjectNotFound):\n helpers.call_action(\"resource_show\", id=resource[\"id\"])", "def test_delete_image_by_wrong_tag(self, test_image):\n tag = f\"{TEST_IMAGE_NAME}:wrong_tag\"\n assert image_exists(TEST_IMAGE_NAME)\n assert not delete_image(tag, force=True)\n assert image_exists(TEST_IMAGE_NAME)\n\n # now delete using that tag, both tags will be gone because it's the same image.\n build_test_image(tag=tag)\n assert image_exists(TEST_IMAGE_NAME)\n assert image_exists(tag)\n assert delete_image(tag, force=True)\n assert not image_exists(TEST_IMAGE_NAME)\n assert not image_exists(tag)", "def _delete_tag_request():\n key = helpers.get('Tag.1.Key')\n resource_id = helpers.get('ResourceId.1')\n\n if resource_id in current_app.config['RESOURCE_TYPE_MAP']:\n resource_type = current_app.config['RESOURCE_TYPE_MAP'][resource_id]\n else:\n errors.invalid_request(\n str(resource_id) + \" not found in configuration\")\n\n args = {\n 'command': 'deleteTags',\n 'resourceids': resource_id,\n 'resourcetype': resource_type,\n 'tags[0].key': key\n }\n\n response = requester.make_request_async(args)\n\n return response", "def test_edit_tag_invalid(self):\r\n\r\n with app.test_client() as client: \r\n d = {\"name\": \"\"}\r\n resp = client.post(f\"/tags/{self.tag.id}/edit\", data=d, follow_redirects=True)\r\n html = resp.get_data(as_text=True)\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn(\"Please enter tag name\", html)", "def test_delete_unkonwn_id_error(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n SampleTemplate.delete(5)", "def test_create_tag_invalid(self):\n tag_data = {'name': ''}\n res = self.client.post(TAGS_URL, data=tag_data)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_tag_invalid(self):\n payload = {'name': ''}\n res = self.client.post(TAGS_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_tag_invalid(self):\n payload = {'name': ''}\n res = self.client.post(TAGS_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_remove_tags(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tags': {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep',\n 'added-1': 'to-delete', 'added-2': 'to-delete'}},\n ],\n })\n p.run()\n\n # verify initial tag set\n s = Session()\n client = s.client('azure.mgmt.resource.ResourceManagementClient')\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep',\n 'added-1': 'to-delete', 'added-2': 'to-delete'})\n\n p = self.load_policy({\n 'name': 'test-azure-remove-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'untag',\n 'tags': ['added-1', 'added-2']}\n ],\n })\n p.run()\n\n # verify tags removed and pre-existing tags not removed\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep'})", "def test_create_tag_invalid(self):\n payload = {'name':''}\n res = self.client.post(TAG_URL,payload)\n self.assertEqual(res.status_code,status.HTTP_400_BAD_REQUEST)", "def test_remove_single_tag(self):\n p = self.load_policy({\n 'name': 'test-azure-remove-single-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tag': 'tag1',\n 'value': 'to-delete'}\n ],\n })\n p.run()\n\n # verify the initial tag set\n s = Session()\n client = s.client('azure.mgmt.compute.ComputeManagementClient')\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'tag1': 'to-delete', 'testtag': 'testvalue'})\n\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'untag',\n 'tags': ['tag1']}\n ],\n })\n p.run()\n\n # verify that the a tag is deleted without modifying existing tags\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'testtag': 'testvalue'})", "def test_deletion_fail(self):\n\n # Assert that a RelaxNoPipeError occurs when the data pipe does not exist.\n self.assertRaises(RelaxNoPipeError, pipes.delete, 'x')", "def test_destroy_not_owner(self):\n\n self.assertEqual(first=1, second=Post.objects.all().count())\n url = reverse('post-detail', args=(self.post.id,))\n self.client.credentials(HTTP_AUTHORIZATION=self.token_1)\n response = self.client.delete(path=url)\n self.assertEqual(first=403, second=response.status_code)\n self.assertEqual(first=1, second=Post.objects.all().count())", "def test_not_created_with_invalid(self):\n payload = {'name': ''}\n res = self.client.post(TAGS_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_before_delete(self, create_with_upload):\n name = \"test.txt\"\n resource = create_with_upload(\n \"hello world\",\n name,\n name=name,\n package_id=factories.Dataset()[\"id\"],\n )\n plugin = p.get_plugin(\"cloudstorage\")\n uploader = plugin.get_resource_uploader(resource)\n assert uploader.get_url_from_filename(resource[\"id\"], name)\n\n helpers.call_action(\"resource_delete\", id=resource[\"id\"])\n assert uploader.get_url_from_filename(resource[\"id\"], name) is None", "def test_delete_unkonwn_id_error(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n PrepTemplate.delete(5)", "def test_security_on_delete(self):\n # test the delete product url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.delete(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_validate_delete(client):\n response = client.delete('/user/1')\n assert response.status_code == 400\n assert response.json['message'] == INVALID_ACTION_MESSAGE", "def test_delete__invalid(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with register.app.test_request_context(self.request_path):\n with self.assertRaises(werkzeug.exceptions.BadRequest):\n self.handler.do_delete(None)\n\n revised_feature = models.Feature.get_by_id(self.feature_id)\n self.assertFalse(revised_feature.deleted)", "def test_add_tag_invalid(self):\n payload = {'name': ''}\n res = self.client.post(TAGS_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_tag_with_invalid_details_invalid(self):\n\n payload = {\n 'name': ''\n }\n\n res = self.client.post(TAGS_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_sg_delete_non_associated(self):\n\n # Add a faked storage group to be tested and another one\n faked_storage_group = self.add_storage_group1()\n self.add_storage_group2()\n\n storage_group_mgr = self.console.storage_groups\n\n storage_group = storage_group_mgr.find(name=faked_storage_group.name)\n\n # Execute the code to be tested.\n storage_group.delete()\n\n # Check that the storage group no longer exists\n with pytest.raises(NotFound):\n storage_group_mgr.find(name=faked_storage_group.name)", "def delete_tag(id):\n try:\n if id == None:\n abort(400,'Id is required! ')\n DeleteTag.run(id)\n except BadRequest as ex:\n return jsonify({'code': '400','message':'Invalid type id.'})\n except NotFound as ex:\n return jsonify({'code': '404','message': 'card not found'})\n except Exception as ex:\n print(type(ex))\n return jsonify({'code': '500','message':'Internal server error.'})\n else:\n return jsonify({'code':'204','message':'There is no answer for this method.'})", "def test_delete_nonexistent_resource_rpc(self, mcg_obj):\n response = mcg_obj.send_rpc_query(\n \"pool_api\", \"delete_namespace_resource\", {\"name\": \"notexisting_resource\"}\n )\n assert \"error\" in response.json()", "def test_delete_attached_volume(self):\n server, validation_resources = self._create_server()\n volume = self.create_volume()\n self.attach_volume(server, volume)\n\n self.assertRaises(lib_exc.BadRequest,\n self.delete_volume, volume['id'])" ]
[ "0.7444001", "0.7069921", "0.6784255", "0.6638141", "0.6606541", "0.6576158", "0.65614355", "0.64411354", "0.6368591", "0.63667697", "0.63317627", "0.632806", "0.6313087", "0.6304011", "0.6302712", "0.63016224", "0.62931526", "0.6242308", "0.62377095", "0.62280387", "0.61895764", "0.6181589", "0.6177318", "0.6163013", "0.61620486", "0.6160703", "0.6155374", "0.615075", "0.614387", "0.6135071" ]
0.7281134
1
tests for the temparature converter
def test_temperature(self): self.assertEqual(Converter.TemperatureCtoF(50), 122) self.assertEqual(Converter.TemperatureCtoF(-50), -58) self.assertEqual(Converter.TemperatureFtoC(50), 10) self.assertAlmostEqual(Converter.TemperatureFtoC(-50), -45.55, places=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tempConvert(temp, unit):\n if unit == 'F':\n celsius = (temp - 32) * 5 / 9\n return celsius\n else:\n return temp", "def test_temperatures_value(self):\n self.assertEqual(self.TminValue, 450.0)", "def convert_temperature(self, event):\n try:\n #Compare other unit to one unit(celsius) then compare that unit to celsius\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"Celsius\": current_value * 1.0, \"Fahrenheit\": (current_value - 32) / 1.8, \"Kelvin\": current_value - 273.15, \"Reaumur\": current_value / 0.8, \"Rankine\": (current_value - 491.67) / 1.8, \"Newton\": current_value / 0.33, \"Romer\": (current_value - 7.5) / 0.525, \"Delisle\": 100 - current_value * 0.66666667}\n new_value={\"Celsius\": unit_comp[current_unit], \"Fahrenheit\": unit_comp[current_unit] * 1.8 + 32, \"Kelvin\": unit_comp[current_unit] + 273.15, \"Reaumur\": unit_comp[current_unit] * 0.8, \"Rankine\": unit_comp[current_unit] * 1.8 + 491.67, \"Newton\": unit_comp[current_unit] * 0.33, \"Romer\": unit_comp[current_unit] * 0.525 + 7.5, \"Delisle\": (100 - unit_comp[current_unit]) * 1.5}\n printer = \"Value is invalid.\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(new_value[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def test_temperatures_units(self):\n self.assertEqual(str(self.TmaxUnits), 'K')", "def test_temperature_to_imperial(self):\n self.assertEqual(\n 77,\n IMPERIAL_SYSTEM.temperature(77, IMPERIAL_SYSTEM.temperature_unit))\n self.assertEqual(\n 77,\n IMPERIAL_SYSTEM.temperature(25, METRIC_SYSTEM.temperature_unit))", "def test_convert_same_unit():\n assert pressure_util.convert(2, PRESSURE_PA, PRESSURE_PA) == 2\n assert pressure_util.convert(3, PRESSURE_HPA, PRESSURE_HPA) == 3\n assert pressure_util.convert(4, PRESSURE_MBAR, PRESSURE_MBAR) == 4\n assert pressure_util.convert(5, PRESSURE_INHG, PRESSURE_INHG) == 5", "def test_temperature_to_metric(self):\n self.assertEqual(\n 25,\n METRIC_SYSTEM.temperature(25, METRIC_SYSTEM.temperature_unit))\n self.assertEqual(\n 26.7,\n METRIC_SYSTEM.temperature(80, IMPERIAL_SYSTEM.temperature_unit))", "def test_xyz_to_turbomol_format(self):\n xyz_t1 = converter.xyz_to_turbomol_format(xyz_dict=self.xyz1['dict'])\n xyz_t2 = converter.xyz_to_turbomol_format(xyz_dict=self.xyz2['dict'])\n xyz_t6 = converter.xyz_to_turbomol_format(xyz_dict=self.xyz6['dict'])\n xyz_t6_eht = converter.xyz_to_turbomol_format(xyz_dict=self.xyz6['dict'], charge=0, unpaired=1)\n self.assertEqual(xyz_t1, self.xyz1['Turbomole'])\n self.assertEqual(xyz_t2, self.xyz2['Turbomole'])\n self.assertEqual(xyz_t6, self.xyz6['Turbomole'])\n self.assertEqual(xyz_t6_eht, self.xyz6['Turbomole eht'])", "def test_convert():", "def test_convert(self):\n height = 1.6 * self.meter\n foot = .305 * self.meter\n inch = 1 / 12 * foot\n\n self.assertTrue(abs(height / foot - 5.246) < .001)\n self.assertTrue(abs(height / inch - 62.951) < .001)\n\n newton = self.kgram * self.meter / (self.second ** 2)\n pound = 4.448222 * newton\n accel = 9.8 * self.meter / (self.second ** 2)\n\n weight = 150 * pound\n mass = weight / accel\n self.assertTrue(abs(mass / self.kgram - 68.085) < .001)", "def test_temperature_to_homekit():\n assert temperature_to_homekit(20.46, TEMP_CELSIUS) == 20.5\n assert temperature_to_homekit(92.1, TEMP_FAHRENHEIT) == 33.4", "def test_convert_nonnumeric_value():\n with pytest.raises(TypeError):\n pressure_util.convert(\"a\", PRESSURE_HPA, PRESSURE_INHG)", "def test_convert_invalid_unit():\n with pytest.raises(ValueError):\n pressure_util.convert(5, INVALID_SYMBOL, VALID_SYMBOL)\n\n with pytest.raises(ValueError):\n pressure_util.convert(5, VALID_SYMBOL, INVALID_SYMBOL)", "def test_temperature_to_states():\n assert temperature_to_states(20, TEMP_CELSIUS) == 20.0\n assert temperature_to_states(20.2, TEMP_FAHRENHEIT) == 68.5", "def test_temperature_same_unit(self):\n self.assertEqual(\n 5,\n METRIC_SYSTEM.temperature(5,\n METRIC_SYSTEM.temperature_unit))", "def test_temperatures(get_touchmat):\n touchmat = get_touchmat\n\n temperatures = touchmat.temperatures()\n info = touchmat.info()\n check_system_types.check_TemperatureInfoList(temperatures, [info])", "def temperatures():\n global correct_temperature\n C = randint(-2742, 4000)/10\n K = C + 273.15\n F = C * 1.8 + 32\n R = F + 459.67\n all_temperatures = [(C,\" Celcuis\"), (F,\" Fahrenheit\"), (K,\" Kelvin\"), (R,\" Ranken\")]\n T1,T2 = tuple(sample(set(all_temperatures), 2))\n\n # Chance of being a interval question instead\n if choice([True, False]):\n if T1[1] is \" Celcuis\" or T1[1] is \" Kelvin\":\n T2 = (T1[0] * 1.8, choice([\" A Fahrenheit Interval\", \" A Ranken Interval\"]))\n else:\n T2 = (T1[0] / 1.8, choice([\" A Celcuis Interval\", \" A Kelvin Interval\"]))\n\n print(\"\\n\\nThis is a temperature question.\\n\")\n print(\"For an temperature of {:0.2f}{}\\nPlease find its equivanlent value in{}\".format(*T1,T2[1]))\n guess = get_user_input(float)\n\n if check_answer(T2[0], guess, 0.2):\n print(\"correct\")\n correct_temperature += 1\n else:\n print(\"false the answer is {:0.2f}{}\".format(*T2))", "def __correctTemperature(self):\n # Correct Temp\n self.write_byte_data(self.address, 0x0E, 0xA5)\n self.write_byte_data(self.address, 0x0F, 0x96)\n self.write_byte_data(self.address, 0x62, 0x02)\n self.write_byte_data(self.address, 0x0E, 0x00)\n self.write_byte_data(self.address, 0x0F, 0x00)", "def convert_temp(orig_temp, units):\n switcher = {\n 'k': ((orig_temp - 32) * 5.0 / 9.0) + 273.15,\n 'c': (orig_temp - 32) * 5.0 / 9.0,\n 'f': orig_temp\n }\n temperature = switcher.get(units.lower())\n conversion = dict(original=orig_temp, units=units, temperature=temperature)\n return conversion", "def temp_converter(temperature, **kwargs):\n if not isinstance(temperature, float) and not isinstance(temperature, int):\n raise ValueError('Positional argument `temperature` must be a float / int')\n\n if not kwargs:\n raise ValueError('Missing keyword argument!')\n else:\n if 'temp_given_in' not in kwargs:\n raise ValueError('Missing keyword argument `temp_given_in`')\n\n temp_given_in = kwargs['temp_given_in']\n\n if not isinstance(temp_given_in, str):\n raise ValueError('Keyword argument `temp_given_in` must be a str')\n\n if temp_given_in == 'c':\n return temperature * 1.8 + 32\n elif temp_given_in == 'f':\n return (temperature - 32) / 1.8\n else:\n raise ValueError('Temperature type must be `c` or `f`'.format(temp_given_in))", "def test_temperatures_when_data_present(self):\n\n temp_data = [(1.00, time.localtime()), (2.00, time.localtime()),\n (3.00, time.localtime()), (4.00, time.localtime())]\n\n tt = TemperatureTracker(temp_data)\n result = tt.temperatures()\n for i in range(0, len(result)):\n self.assertEqual(result[i][0], temp_data[i][0])\n self.assertEqual(result[i][1], temp_data[i][1])", "def testFtoCValues(self):\r\n for c,f in self.knownConversionsCtoF:\r\n result = conversions.convertFahrenheittoCelsius(f)\r\n self.assertEqual(c,result)", "def P2T(self):\n # Convert to RJ temperature\n #fac=planck.I2Ta(self.f*1e6,1).value\n fac = planck(self.f*1e6, 1)\n fac=fac/fac[0]\n self.spec=self.spec*np.tile(fac,(self.spec.shape[0],1))", "def testKtoFValues(self):\r\n for f,k in self.knownConversionsFtoK:\r\n result = conversions.convertKelvintoFahrenheit(k)\r\n self.assertEqual(f,result)", "def test_dummy(self, data):\r\n source, expected = data\r\n result = self.converter.convert(source)\r\n self.assertUnicodeEquals(result, expected)", "def testFtoC(self):\r\n for integer, numeral in self.ftocvalues:\r\n result = conversions_refactored.convert('Fahrenheit', 'Celsius', integer) \r\n self.assertEqual(numeral, result, msg='Incorrect result, calculation error')", "def test_str_temp(self):\n xknx = XKNX(loop=self.loop)\n sensor = Sensor(\n xknx,\n 'TestSensor',\n group_address_state='1/2/3',\n value_type=\"temperature\")\n sensor.sensor_value.payload = DPTArray((0x0c, 0x1a))\n\n self.assertEqual(sensor.resolve_state(), 21.00)\n self.assertEqual(sensor.unit_of_measurement(), \"°C\")\n self.assertEqual(sensor.ha_device_class(), \"temperature\")", "def testCtoFValues(self):\r\n for c,f in self.knownConversionsCtoF:\r\n result = conversions.convertCelsiusToFahrenheit(c)\r\n self.assertEqual(f,result)", "def do_tc(self, arg):\n a = arg.split()\n\n if len(a) >= 1:\n ise.useTemperatureCompensation(int(a[0]))\n\n print(\"\\ttemp. compensation: \" + str(ise.usingTemperatureCompensation()))", "def test_temperature_unknown_unit(self):\n with self.assertRaises(ValueError):\n METRIC_SYSTEM.temperature(5, 'K')" ]
[ "0.64593834", "0.6315767", "0.62554526", "0.61849815", "0.6140915", "0.6105866", "0.6082826", "0.60662615", "0.6046566", "0.60280526", "0.60113657", "0.59822845", "0.59665346", "0.5951075", "0.58979714", "0.587829", "0.5843394", "0.5784856", "0.57688904", "0.5757867", "0.57042634", "0.56893057", "0.5675777", "0.56410784", "0.562611", "0.56223536", "0.56222", "0.55974054", "0.5592725", "0.5591686" ]
0.64599544
0
tests for the measurment converter
def test_measurment(self): self.assertEqual(Converter.MeasurmentWorldtoUS(10, "km"), 6.214) self.assertEqual(Converter.MeasurmentWorldtoUS(10, "m"), 10.936) self.assertEqual(Converter.MeasurmentWorldtoUS(10, "cm"), 0.328) self.assertEqual(Converter.MeasurmentWorldtoUS(10, "mm"), 0.394) self.assertEqual(Converter.MeasurmentUStoWorld(10, "mi"), 16.093) self.assertEqual(Converter.MeasurmentUStoWorld(10, "yd"), 9.144) self.assertEqual(Converter.MeasurmentUStoWorld(10, "ft"), 304.8) self.assertEqual(Converter.MeasurmentUStoWorld(10, "in"), 254)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_convert_same_unit():\n assert pressure_util.convert(2, PRESSURE_PA, PRESSURE_PA) == 2\n assert pressure_util.convert(3, PRESSURE_HPA, PRESSURE_HPA) == 3\n assert pressure_util.convert(4, PRESSURE_MBAR, PRESSURE_MBAR) == 4\n assert pressure_util.convert(5, PRESSURE_INHG, PRESSURE_INHG) == 5", "def test_convert():", "def test_convert(self):\n height = 1.6 * self.meter\n foot = .305 * self.meter\n inch = 1 / 12 * foot\n\n self.assertTrue(abs(height / foot - 5.246) < .001)\n self.assertTrue(abs(height / inch - 62.951) < .001)\n\n newton = self.kgram * self.meter / (self.second ** 2)\n pound = 4.448222 * newton\n accel = 9.8 * self.meter / (self.second ** 2)\n\n weight = 150 * pound\n mass = weight / accel\n self.assertTrue(abs(mass / self.kgram - 68.085) < .001)", "def convert_input(self, measurements):\n return 0", "def describe_a_library_of_units_converters_that():\n def blows_smoke():\n assert True\n\n def can_convert_psi_to_kpa():\n assert psi2kpa(32) == 220.631712 # 32 PSI == 220.631712 KPa; average car tire pressure\n assert psi2kpa(8.5) == 58.6052985 # 8.5 PSI == 58.6052985 KPa; basketball pressure\n\n # def can_convert_kpa_to_psi():\n # assert kpa2psi(101.325) == 14.695952495133 # KPa => PSI; average air pressure at sea level\n # assert kpa2psi(220.631712) == 31.999932479367043 # KPa => PSI; average car tire pressure\n\n # def can_convert_mpg_to_lp100k():\n # assert mpg2lp100k(40) == 5.8803694563 # miles-per-gallon => liters per 100km\n # assert mpg2lp100k(25) == 9.408591130080001 # miles-per-gallon => liters per 100km\n\n # def can_convert_lp100k_to_mpg():\n # assert lp100k2mpg(9.4) == 25.022895167663442 # liters per 100km => mpg\n # assert lp100k2mpg(5.1) == 46.12063030902673 # liters per 100km => mpg", "def test_convert_incompatible_units(self):\n self.assertRaises(ValueError, convert_units, self.arr, 'm')", "def test_convert_invalid_unit():\n with pytest.raises(ValueError):\n pressure_util.convert(5, INVALID_SYMBOL, VALID_SYMBOL)\n\n with pytest.raises(ValueError):\n pressure_util.convert(5, VALID_SYMBOL, INVALID_SYMBOL)", "def test_convert_amounts(self):\n pass", "def test_temperature_to_metric(self):\n self.assertEqual(\n 25,\n METRIC_SYSTEM.temperature(25, METRIC_SYSTEM.temperature_unit))\n self.assertEqual(\n 26.7,\n METRIC_SYSTEM.temperature(80, IMPERIAL_SYSTEM.temperature_unit))", "def test_length_to_metric(self):\n self.assertEqual(\n 100,\n METRIC_SYSTEM.length(100, METRIC_SYSTEM.length_unit)\n )\n self.assertEqual(\n 8.04672,\n METRIC_SYSTEM.length(5, IMPERIAL_SYSTEM.length_unit)\n )", "def test_unit(self):\n self.assertEqual(DPTSignedRelativeValue.unit, \"\")\n self.assertEqual(DPTPercentV8.unit, \"%\")\n self.assertEqual(DPTValue1Count.unit, \"counter pulses\")", "def test_convert_compatible_units(self):\n result = convert_units(self.arr, 'degC')\n expected_data = np.array([[-273.15, -272.15], [-271.15, -270.15]])\n expected_units = cf_units.Unit('degC')\n self.assertEquals(result.units, expected_units)\n self.assertArrayEqual(result.data, expected_data)", "def test_chart_parsers():", "def test_get_measure_parameters(self):\n pass", "def test_dummy(self, data):\r\n source, expected = data\r\n result = self.converter.convert(source)\r\n self.assertUnicodeEquals(result, expected)", "def test_ms2min(self):\n result = TimeUnit(-50, 'ms', 'min')\n self.assertRaises(ValueError, lambda: result.doconvert())", "def test_convert_nonnumeric_value():\n with pytest.raises(TypeError):\n pressure_util.convert(\"a\", PRESSURE_HPA, PRESSURE_INHG)", "def test_hr2sec(self):\n result = TimeUnit(-1.7, 'hr', 'sec')\n self.assertRaises(ValueError, lambda: result.doconvert())", "def test_measurement(lasco):\n assert lasco.measurement == \"white-light\"", "def test_unit_conversion_incompatible(self):\n self.orography_cube.units = 'K'\n msg = \"Unable to convert from\"\n with self.assertRaisesRegex(ValueError, msg):\n self.plugin_instance.process(self.orography_cube)", "def test_inches_invalid_input(self):\n result = inch_to_cm(\"--\")\n self.assertIsNone(result)", "def test_speed(self):\r\n self.assertEqual(Converter.SpeedKMHtoMPH(10), 6.2)\r\n self.assertEqual(Converter.SpeedMPHtoKMH(10), 16.1)", "def test_cast(self):\n dim = Fidelity(\"epoch\", 1, 10)\n with pytest.raises(NotImplementedError):\n dim.cast()", "def test_converter(self, data):\r\n source, expected = data\r\n result = UpcaseConverter().convert(source)\r\n self.assertEquals(result, expected)", "def test_measure_no_args(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\"Measure | 0\\n\")\n assert bb.operations == [{\"modes\": [0], \"op\": \"Measure\"}]", "def extendedConvert(self):\r\n devId = str(self.deviceId)\r\n if(devId == '28' or devId == '29'):\r\n answers = []\r\n #just add the counter value\r\n answers.append(self.fields[1])\r\n #find the engineering units converter\r\n enum = self.fields[0] & 0x3F\r\n #look up the scale and offset for that eeu\r\n eeu = self._eeumaps[str(enum)]\r\n self.eeu1 = eeu\r\n print('eeu:' + str(eeu))\r\n #convert from twos complement and adjust by scale/offset\r\n val = (self.convertSigned16(self.fields[2]) * eeu[1]) + eeu[0]\r\n answers.append(val)\r\n #reset fields to hold the new answers\r\n self.fields = answers\r\n self.units = [self.UNITS_COUNT, eeu[2]]\r\n elif(devId == '53' or devId == '54'):\r\n #strip off the first part of the answer which is the last part of the\r\n #serial number\r\n answers = [self.fields[1]]\r\n self.fields = answers\r\n elif(devId == '75' or devId == '76'):\r\n answers = []\r\n #find out the number of I/O points\r\n pointCount = self.fields[0] & 3\r\n #find out engineering units for 1st I/O\r\n enum = self.fields[1] & 0x3F\r\n eeu = self._eeumaps[str(enum)]\r\n self.eeu1 = eeu\r\n #new value = old value * scale + offset\r\n val = (self.convertSigned16(self.fields[3]) * eeu[1]) + eeu[0]\r\n answers.append(val)\r\n self.units = [eeu[2]]\r\n #see if there's two\r\n if pointCount == 2:\r\n #find out engineering units for 2nd I/O\r\n #and off first two bits\r\n enum = self.fields[0] >> 2\r\n eeu = self._eeumaps[str(enum)]\r\n self.eeu2 = eeu\r\n val = (self.convertSigned16(self.fields[2]) * eeu[1]) + eeu[0]\r\n answers.append(val)\r\n self.units.append(eeu[2])\r\n else:\r\n self.eeu2 = []\r\n #reset fields to hold the new answers\r\n self.fields = answers\r\n\r\n return", "def test_native_measurements(self, valkmusa, meas):\n\n QB1 = valkmusa.qubits[0]\n valkmusa.validate_operation(meas(QB1))", "def test_unit_conversion(self):\n self.cube_uv_down.convert_units(\"kW m-2\")\n scale_factor = 1.0\n expected = np.full_like(\n self.cube_uv_down.data, dtype=np.float32, fill_value=0.1\n )\n result = calculate_uv_index(self.cube_uv_down, scale_factor)\n self.assertArrayEqual(result.data, expected)", "def test_canConvert(string, cast, expected):\n assert canConvert(string, cast) == expected", "def test_data_type(self):\n self.assertTrue(self.tester.data_type(), \"18S\")" ]
[ "0.6973871", "0.69157994", "0.67782086", "0.63163084", "0.6262526", "0.61533237", "0.61193854", "0.6063204", "0.6003036", "0.60023606", "0.59685755", "0.58939403", "0.58698696", "0.5863226", "0.5838598", "0.58297575", "0.5821282", "0.58060056", "0.5742862", "0.57288164", "0.5701122", "0.5672144", "0.56264275", "0.558963", "0.55837756", "0.5574937", "0.5551992", "0.55289793", "0.5528802", "0.54865235" ]
0.7210032
0
test for the speed converter
def test_speed(self): self.assertEqual(Converter.SpeedKMHtoMPH(10), 6.2) self.assertEqual(Converter.SpeedMPHtoKMH(10), 16.1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _convert_speed(self, value):\n if value & SPEED_ACTIVE:\n return None\n else:\n return value & SPEED_MASK", "def speed(self) -> int:", "def speed(self) -> int:", "def speed(self, s=0):", "def convert_speed(self, event):\n try:\n #Compare other unit to one unit(meters/second)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"Mach number\": 340.2933, \"Nm/24hr\": 0.021435, \"centimeters/minute\": 0.000167, \"centimeters/second\": 0.01, \"feet/hour\": 8.5e-05, \"feet/minute\": 0.00508, \"feet/second\": 0.3048, \"inches/minute\": 0.000423, \"inches/second\": 0.0254, \"kilometers/hour\": 0.277778, \"kilometers/second\": 1000.0, \"knots\": 0.514444, \"meters/hour\": 0.000278, \"meters/minute\": 0.016667, \"meters/second\": 1.0, \"miles/hour\": 0.44704, \"miles/minute\": 26.8224, \"miles/second\": 1609.344, \"nautical miles/hour\": 0.514444, \"speed of light\": 299790000.0, \"speed of sound\": 343.0, \"yards/hour\": 0.000254, \"yards/minute\": 0.01524, \"yards/second\": 0.9144}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def speed(self, speed: int, time: int = 0, /) -> None:", "def speed(self, value: int, /) -> None:", "def speed(self):\n return 1 # speed system not implemented yet", "def check_speed(self, vals: dict) -> None:\r\n if vals['bools']['hyper']:\r\n self._speed = self._dir * self._hyper_speed\r\n elif vals['bools']['fast']:\r\n self._speed = self._dir * self._fast_speed\r\n else:\r\n self._speed = self._dir * self._normal_speed", "def _is_valid_interface_speed(speed):\n if 'ten' in speed:\n speed = \"tengigabitethernet\"\n return True\n elif 'gig' in speed:\n speed = \"gigabitEthernet\"\n return True\n elif 'for' in speed:\n speed = \"fortyGigabitEthernet\"\n return True\n elif 'hun' in speed:\n speed = \"hundredGigabitEthernet\"\n return True\n else:\n LOG.error(_LE(\"_is_valid_interface_speed:invalid speed parameter %s\"\n \" configure valid speed\"), speed)\n return False", "def set_speed():\n pass", "def get_speed(val):\n if val in ['', 255]:\n return None\n return speed(val, 'KMH').value('KT')", "def test_str_speed_ms(self):\n xknx = XKNX(loop=self.loop)\n sensor = Sensor(\n xknx,\n 'TestSensor',\n group_address_state='1/2/3',\n value_type=\"speed_ms\")\n sensor.sensor_value.payload = DPTArray((0x00, 0x1b,))\n\n self.assertEqual(sensor.resolve_state(), 0.27)\n self.assertEqual(sensor.unit_of_measurement(), \"m/s\")\n self.assertEqual(sensor.ha_device_class(), None)", "def _validate_speed(self, speed: pint.Quantity | None) -> str:\n # Validated speeds are used as command argument, with empty string being the default for None\n if speed is None:\n return \"\"\n\n # Alert if out of bounds but don't raise exceptions, according to general philosophy.\n # Target flow rate too high\n if speed < ureg.Quantity(\"2 sec/stroke\"):\n speed = ureg.Quantity(\"2 sec/stroke\")\n warnings.warn(\n f\"Desired speed ({speed}) is unachievable!\"\n f\"Set to {self._seconds_per_stroke_to_flowrate(speed)}\"\n f\"Wrong units? A bigger syringe is needed?\"\n )\n\n # Target flow rate too low\n if speed > ureg.Quantity(\"3692 sec/stroke\"):\n speed = ureg.Quantity(\"3692 sec/stroke\")\n warnings.warn(\n f\"Desired speed ({speed}) is unachievable!\"\n f\"Set to {self._seconds_per_stroke_to_flowrate(speed)}\"\n f\"Wrong units? A smaller syringe is needed?\"\n )\n\n return str(round(speed.m_as(\"sec / stroke\")))", "def GetSpeed(self):\n pass", "def speed_converter(speed, **kwargs):\n if not isinstance(speed, float) and not isinstance(speed, int):\n raise ValueError('Keyword argument `speed` must be an float/integer')\n\n if not kwargs:\n raise ValueError('Missing keyword arguments!')\n else:\n if 'dist' not in kwargs:\n raise ValueError('Missing keyword argument `dist`')\n if 'time' not in kwargs:\n raise ValueError('Missing keyword argument `time`')\n\n dist = kwargs['dist']\n time = kwargs['time']\n\n if not isinstance(dist, str):\n raise ValueError('Keyword argument `dist` must be a string')\n if not isinstance(time, str):\n raise ValueError('Keyword argument `time` must be a string')\n\n if time == 'ms':\n time_multiplier = 60 * 60 * 100\n elif time == 's':\n time_multiplier = 60 * 60\n elif time == 'min' or time == 'm':\n time_multiplier = 60\n elif time == 'hr':\n time_multiplier = 1\n elif time == 'day':\n time_multiplier = 1 / 24\n else:\n raise ValueError('Keyword argument `time` must be either ms|s|m|hr|d')\n\n if dist == 'km':\n distance_multiplier = 1\n elif dist == 'm':\n distance_multiplier = 1000\n elif dist == 'ft':\n distance_multiplier = 3280.8398950131\n elif dist == 'yrd':\n distance_multiplier = 1093.6\n else:\n raise ValueError('Keyword argument `dist` must be either km|m|ft|yrd')\n\n return speed * distance_multiplier / time_multiplier", "def test_c(self):\n self.failIf(cgs.speed_of_light/mks.speed_of_light!=100)", "async def test_speed_read(hass: HomeAssistant, utcnow) -> None:\n helper = await setup_test_component(hass, create_fan_service)\n\n state = await helper.async_update(\n ServicesTypes.FAN,\n {\n CharacteristicsTypes.ON: 1,\n CharacteristicsTypes.ROTATION_SPEED: 100,\n },\n )\n assert state.attributes[\"percentage\"] == 100\n assert state.attributes[\"percentage_step\"] == 1.0\n\n state = await helper.async_update(\n ServicesTypes.FAN,\n {\n CharacteristicsTypes.ROTATION_SPEED: 50,\n },\n )\n assert state.attributes[\"percentage\"] == 50\n\n state = await helper.async_update(\n ServicesTypes.FAN,\n {\n CharacteristicsTypes.ROTATION_SPEED: 25,\n },\n )\n assert state.attributes[\"percentage\"] == 25\n\n state = await helper.async_update(\n ServicesTypes.FAN,\n {\n CharacteristicsTypes.ON: 0,\n CharacteristicsTypes.ROTATION_SPEED: 0,\n },\n )\n assert state.attributes[\"percentage\"] == 0", "def speed_detect(motion_first, motion_second, distance):\n if (GPIO.input(motion_first) == 1):\n # Determines if first motion sensor is triggered first.\n speed(timing(motion_second), distance)\n # Sends speed as message to server when corresponding sensor triggered.\n ensure_low(motion_first, motion_second)\n # Ensure that both motion sensors are low/are not triggered.\n\n if (GPIO.input(motion_second) == 1):\n # Determines if second motion sensor is triggered first.\n speed(timing(motion_first), distance)\n # Sends speed as message to server when corresponding sensor triggered.\n ensure_low(motion_first, motion_second)\n # Ensure that both motion sensors are low/are not triggered.", "def testX264Speed(self):\n if self.x264Speed in tools.X264_SPEEDS:\n self.assertEqual(\n self.x264Speed,\n self.config.x264Speed\n )\n else:\n self.assertNotEqual(\n self.x264Speed,\n self.config.x264Speed\n )\n self.assertEqual(\n tools.X264_SPEED_DEFAULT,\n self.config.x264Speed\n )", "def test_convert_same_unit():\n assert pressure_util.convert(2, PRESSURE_PA, PRESSURE_PA) == 2\n assert pressure_util.convert(3, PRESSURE_HPA, PRESSURE_HPA) == 3\n assert pressure_util.convert(4, PRESSURE_MBAR, PRESSURE_MBAR) == 4\n assert pressure_util.convert(5, PRESSURE_INHG, PRESSURE_INHG) == 5", "def get_speed(self):\n raise NotImplementedError", "def get_speed(self):\n raise NotImplementedError", "def tonfilter(ton,_speed):\r\n global _ton\r\n global _status\r\n global _status_old\r\n min_ton = 13\r\n\r\n if ton <= min_ton and _speed >= 5 :\r\n _ton = 0\r\n _status = 'EMPTY_TRAVEL'\r\n elif ton <= min_ton and _speed < 5 :\r\n _ton = 0\r\n _status = 'EMPTY_STOP'\r\n elif ton > min_ton and _speed < 5 and _slope == 1 and (_status_old == 'EMPTY_STOP' or _status_old == 'LOADING'):\r\n _ton = ton\r\n _status = 'LOADING'\r\n elif ton >= 140 and _speed > 5 :\r\n _ton = ton\r\n _status = 'LOADED_MOVE'\r\n elif ton >= 140 and _speed <= 5 and _slope == 1 :\r\n _ton = ton\r\n _status = 'LOADED_STOP'\r\n elif ton > min_ton and ton <= 135 and _speed < 5 and _slope == -1 and (_status_old == 'LOADED_STOP' or _status_old == 'LOADED_MOVE' or _status_old == 'DUMPING'):\r\n _ton = ton\r\n _status = 'DUMPING'\r\n else :\r\n _ton = ton\r\n _status_old = _status", "def test_convert(self):\n height = 1.6 * self.meter\n foot = .305 * self.meter\n inch = 1 / 12 * foot\n\n self.assertTrue(abs(height / foot - 5.246) < .001)\n self.assertTrue(abs(height / inch - 62.951) < .001)\n\n newton = self.kgram * self.meter / (self.second ** 2)\n pound = 4.448222 * newton\n accel = 9.8 * self.meter / (self.second ** 2)\n\n weight = 150 * pound\n mass = weight / accel\n self.assertTrue(abs(mass / self.kgram - 68.085) < .001)", "def speed(self):\n speed = self.get_of_features_speed()\n\n if speed is not None:\n return speed\n\n if self._custom_speed is not None:\n return self._custom_speed\n\n if self._is_v0x04() and self.port_number == PortNo04.OFPP_LOCAL:\n return 0\n\n if not self._is_v0x04() and self.port_number == PortNo01.OFPP_LOCAL:\n return 0\n\n # Warn unknown speed\n # Use shorter switch ID with its beginning and end\n if isinstance(self.switch.id, str) and len(self.switch.id) > 20:\n switch_id = self.switch.id[:3] + '...' + self.switch.id[-3:]\n else:\n switch_id = self.switch.id\n LOG.warning(\"Couldn't get port %s speed, sw %s, feats %s\",\n self.port_number, switch_id, self.features)\n\n return None", "def get_speed(self):\n raise NotImplementedError()", "def speed(self) -> str:\n current_wink_speed = self.wink.current_fan_speed()\n if SPEED_AUTO == current_wink_speed:\n return SPEED_AUTO\n if SPEED_LOWEST == current_wink_speed:\n return SPEED_LOWEST\n if SPEED_LOW == current_wink_speed:\n return SPEED_LOW\n if SPEED_MEDIUM == current_wink_speed:\n return SPEED_MEDIUM\n if SPEED_HIGH == current_wink_speed:\n return SPEED_HIGH\n return None", "def convertSpeed(self, v):\n\t\tconverted = v/(self.microstep*9.375)\n\t\treturn converted", "def test_speed_to_states():\n speed_mapping = HomeKitSpeedMapping([\"off\", \"low\", \"high\"])\n assert speed_mapping.speed_to_states(-1) == \"off\"\n assert speed_mapping.speed_to_states(0) == \"off\"\n assert speed_mapping.speed_to_states(33) == \"off\"\n assert speed_mapping.speed_to_states(34) == \"low\"\n assert speed_mapping.speed_to_states(50) == \"low\"\n assert speed_mapping.speed_to_states(66) == \"low\"\n assert speed_mapping.speed_to_states(67) == \"high\"\n assert speed_mapping.speed_to_states(100) == \"high\"" ]
[ "0.6799373", "0.6479763", "0.6479763", "0.6465872", "0.64656264", "0.6375015", "0.6311451", "0.6302171", "0.6270626", "0.6230628", "0.61868745", "0.6089499", "0.6011468", "0.598422", "0.59120023", "0.59076583", "0.5869805", "0.58342975", "0.58227706", "0.5811397", "0.5793797", "0.5789036", "0.5789036", "0.5771143", "0.576776", "0.5738222", "0.5738069", "0.5735555", "0.56880856", "0.5686198" ]
0.7302605
0
Monitor the status of the specified HAT device in a loop until the triggered status is True or the running status is False.
def wait_for_trigger(self): # Read the status only to determine when the trigger occurs. is_running = True is_triggered = False while is_running and not is_triggered: status = self.hat.a_in_scan_status() is_running = status.running is_triggered = status.triggered
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\r\n while True:\r\n if self.camera_device.is_detecting():\r\n self.alarm_device.switch_alarm()", "async def monitor():\n\n for n in range(6):\n await asyncio.sleep(2)\n print(\"monitor status:\", n, await ps.status())", "def run(self):\n run = True\n while run:\n statuses = self.check_status()\n unhealthy = [ups for ups in statuses.keys() if not statuses[ups]]\n drivers_to_bounce = set()\n for ups in unhealthy:\n driver = self.ups_confs[ups].get('driver', 'usbhid-ups')\n drivers_to_bounce.add(driver)\n if drivers_to_bounce:\n self.bounce_drivers(drivers_to_bounce)\n time.sleep(self.monitor_cycle/1000)", "def wait_for_trigger(hat):\n try: \n # GPIO.setmode(GPIO.BCM)\n GPIO.setup(PWR_PIN, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n print('\\n <<<READY>>>\\n\\n (0) Waiting for trigger to initiate recording (or press Ctrl+C to abort)\\n')\n \n # Wait until LoStik is properly inserted\n global LoStikInserted\n while(LoStikInserted):\n try:\n ser = serial.Serial(\"/dev/ttyUSB0\", baudrate=57600)\n manager = (ReaderThread(ser, PrintLines))\n enter = type(manager).__enter__\n exit = type(manager).__exit__\n value = enter(manager)\n hit_except = False\n LoStikInserted = 0\n\t\t\t\t\n except:\n for a in range(10):\n GPIO.output(RECORDING_LED,GPIO.HIGH)\n time.sleep(.1)\n GPIO.output(RECORDING_LED,GPIO.LOW) \n time.sleep(.1)\t\t\t\n print(\" LoStik USB not Properly Inserted!\") \n LoStikInserted = 1\n try:\n protocol = value\n while(CMD_RECEIVED):\n if GPIO.input(PWR_PIN) == 1 or CMD_SHUTDOWN:\n print(\" Shutting Down\")\n GPIO.cleanup()\n time.sleep(1)\n hat.a_in_scan_cleanup()\n time.sleep(1)\n quit()\n pass\n except:\n hit_except = True\n if not exit(manager, *sys.exc_info()):\n raise\n finally:\n if not hit_except:\n exit(manager, None, None, None)\n\n except KeyboardInterrupt:\n\t print(CURSOR_BACK_2, ERASE_TO_END_OF_LINE, '\\n')\n\t hat.a_in_scan_cleanup()\n\t GPIO.cleanup()\n\t quit()\n\n # Sends trigger pin on RPi to HIGH which should be connected to MCC118 trigger input pin\n GPIO.setup(TRIGGER_PIN, GPIO.OUT)\t\n GPIO.output(TRIGGER_PIN,GPIO.HIGH)\n \n # Read the status only to determine when the trigger occurs.\n is_running = True\n is_triggered = False\n while is_running and not is_triggered:\n status = hat.a_in_scan_status()\n is_running = status.running\n is_triggered = status.triggered\n if not is_triggered:\n time.sleep(0.001)\n GPIO.cleanup()", "async def status_update_loop(self):\n self.status_message_update_waiter = sleep(UPDATE_INTERVAL, KOKORO)\n \n while self.state == CHANNEL_MOVE_STATE_NONE:\n set_value = await self.status_message_update_waiter\n # sleep sets by `None`\n if set_value is not None:\n break\n \n self.status_message_update_waiter = sleep(UPDATE_INTERVAL, KOKORO)\n await self.update_status_message()\n continue\n \n await self.update_status_message()\n await self.send_done_notification()\n return", "def waitForCompletion(self):\n\n while(json.loads(self.robot.device())['state']!=0):\n time.sleep(0.1)\n continue\n\n return", "def wait_for_instance_status(config, status):\n client = config.create_api_client()\n InstanceId = config.get('InstanceId')\n while True:\n time.sleep(20)\n req = DescribeInstancesRequest.DescribeInstancesRequest()\n result = do_action(client, req)\n items = result[\"Instances\"][\"Instance\"]\n lookups = {item['InstanceId']: item for item in items}\n if lookups[InstanceId]['Status'] == status:\n return\n else:\n click.echo(\"Instance's current status: {}; transfer to status {} ...\".format(\n lookups[InstanceId]['Status'], status\n ))", "async def loop_presence(self):\n # TODO: Does this even work?\n presence = await self.set_presence()\n logger.debug(f'{presence[\"activity\"][1]} {presence[\"status\"][1]}')", "def wait(self):\n while not self.done:\n self.device._handle_events(1000)", "def run():\n # 1 sec delay to allow DHT22 sensor to start as per datasheet\n sleep_ms(1000)\n last_run = ticks_ms()\n _read()\n\n while True:\n if ticks_diff(ticks_ms(), last_run) > _READING_DELAY_MS:\n last_run = ticks_ms()\n _read()\n\n _signal_alive()\n sleep_ms(1000)", "def check_completion(self):\n\n time.sleep(3)\n while self.status == 0:\n pass", "def wait(self, **kwargs):\n\n\t\tif not callable(self.on):\n\t\t\traise UserWarning('Your device does not support the self.on function, try without wait')\n\n\t\tif not callable(self.histogram):\n\t\t\traise UserWarning('Your device does not support the self.histogram function, try without wait')\n\n\t\tself.on()\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tif self.histogram() is None:\n\t\t\t\t\traise UserWarning('Could not load histogram, perhaps the device is not yet connected')\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\n\t\t\texcept UserWarning as e:\n\t\t\t\ttime.sleep(kwargs.get('check', 200) / 1000.)\n\n\t\treturn self", "async def _watch_status(self, job_id, job_paths):\n status_path = job_paths['status.json']\n\n watcher = aionotify.Watcher()\n watcher.watch(status_path, aionotify.Flags.CLOSE_WRITE)\n await watcher.setup(self.loop)\n try:\n while True:\n try:\n await self._read_status(job_id, job_paths)\n await watcher.get_event()\n self.logger.debug(f'Detected status change for job {job_id}')\n except concurrent.futures.CancelledError:\n # Break loop (likely normal exit through task cancellation)\n break\n except Exception: # pylint: disable=broad-except\n self.logger.exception(f'Exception while watching status of job {job_id}')\n finally:\n watcher.unwatch(status_path)\n watcher.close()", "def monitor(self):\n while True:\n complete = True\n for thread in self._running:\n if not thread.complete:\n complete = False\n\n if thread.complete:\n thread.join()\n elif thread.failed:\n pass\n\n if complete:\n break\n time.sleep(Threadable.THREAD_SLEEP)", "async def check_status(self):\n while True:\n async with self._loop_lock:\n new_monitor_processes = {}\n for class_name in self.monitor_processes:\n monitor = self.monitor_processes[class_name][\"process\"]\n if monitor.poll() is not None:\n log = f\"Monitor {class_name} has stopped with code: {monitor.returncode}\"\n if monitor.returncode:\n self.general_logger.warning(log)\n if self.config[\"WebhookConfig\"][\"crash_webhook\"]:\n embed = get_mm_crash_embed(\n \"Monitor \" + class_name,\n monitor.returncode,\n monitor.pid,\n )\n ts = datetime.now().strftime(\n self.config[\"WebhookConfig\"][\"timestamp_format\"]\n )\n\n embed.set_footer(\n text=f\"{self.config['WebhookConfig']['provider']} | {ts}\",\n icon_url=self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n )\n data = json.dumps(\n {\n \"embeds\": [embed.to_dict()],\n \"username\": \"MonitorManager process watcher\",\n \"avatar_url\": self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n }\n )\n r = await self.client.fetch(\n self.config[\"WebhookConfig\"][\"crash_webhook\"],\n method=\"POST\",\n body=data,\n headers={\"content-type\": \"application/json\"},\n raise_error=False,\n )\n else:\n self.general_logger.info(log)\n else:\n new_monitor_processes[class_name] = self.monitor_processes[\n class_name\n ]\n self.monitor_processes = new_monitor_processes\n\n new_scraper_processes = {}\n for class_name in self.scraper_processes:\n scraper = self.scraper_processes[class_name][\"process\"]\n if scraper.poll() is not None:\n log = f\"Scraper {class_name} has stopped with code: {scraper.returncode}\"\n if scraper.returncode:\n self.general_logger.warning(log)\n if self.config[\"WebhookConfig\"][\"crash_webhook\"]:\n embed = get_mm_crash_embed(\n \"Scraper \" + class_name,\n scraper.returncode,\n scraper.pid,\n )\n ts = datetime.now().strftime(\n self.config[\"WebhookConfig\"][\"timestamp_format\"]\n )\n\n embed.set_footer(\n text=f\"{self.config['WebhookConfig']['provider']} | {ts}\",\n icon_url=self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n )\n data = json.dumps(\n {\n \"embeds\": [embed.to_dict()],\n \"username\": \"MonitorManager process watcher\",\n \"avatar_url\": self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n }\n )\n r = await self.client.fetch(\n self.config[\"WebhookConfig\"][\"crash_webhook\"],\n method=\"POST\",\n body=data,\n headers={\"content-type\": \"application/json\"},\n raise_error=False,\n )\n else:\n self.general_logger.info(log)\n else:\n new_scraper_processes[class_name] = self.scraper_processes[\n class_name\n ]\n self.scraper_processes = new_scraper_processes\n await asyncio.sleep(1)", "def wait_for_object_status(self, object_name, object_id, status,\n timeout=120, interval=3):\n cmd = self.object_cmd(object_name, 'show')\n start_time = time.time()\n while time.time() - start_time < timeout:\n if status in self.cinder(cmd, params=object_id):\n break\n time.sleep(interval)\n else:\n self.fail(\"%s %s did not reach status %s after %d seconds.\"\n % (object_name, object_id, status, timeout))", "def monitor(self):\n if self.startup():\n time.sleep(0.250)\n self.run()", "def wait_for_status(self, status):\n code = self.instance.state['Code']\n while code != status:\n time.sleep(3)\n self.instance.reload()\n code = self.instance.state['Code']", "def run(self):\n while True:\n self.current_wifi_clients()\n self._eval_is_someone_home()\n time.sleep(self._interval)", "def catExists():\n currentTime = datetime.now().strftime('%H:%M:%S')\n while True:\n #wlbt.Trigger()\n target = wlbt.GetSensorTargets()\n if target:\n breathing = isBreathing()\n if breathing == 1:\n #print(\"the cat is alive!\")\n catStatus = 2\n else:\n #print(\"the cat is dead!\")\n catStatus = 1\n else:\n #print(\"There's no cat in this box\")\n catStatus = 0\n return catStatus", "def heat_on():\n global PAUSED\n print(\"Temp is low; toggling heat on\")\n GPIO.output(COOLPIN, RELAYOFF)\n GPIO.output(FANPIN, RELAYOFF)\n GPIO.output(HEATPIN, RELAYON)\n while (all_temps_avg < TEMPMID or max_temp < TEMPLOW) and (PAUSED == False):\n time.sleep(10)", "def loop(self):\r\n self._initialize()\r\n if self._waitEvConnected(None):\r\n return self._handleEvConnected()\r\n else:\r\n return False", "def watch(self):\n self.bot.set_all_initial()\n if \"monitor_iterations\" in self.auto_settings:\n monitor_iterations = self.auto_settings[\"monitor_iterations\"]\n else:\n monitor_iterations = 10\n\n monitor_results = self.motion_sensors.detect_motion(read_times=monitor_iterations)\n if monitor_results is None:\n return\n else:\n self.communications.set_status(\"Motion Detected, PANIC!\")\n self.communications.add_event(\"Motion Triggered on {}\".format(monitor_results.items()))\n self.communications.send_notification(\"Motion Detected\", monitor_results.keys())", "def update_status(self):\n if self.pwm:\n if self.state == GPIO.HIGH:\n thread = threading.Thread(target=self._pwm_on, args=())\n thread.start()\n elif self.state == GPIO.LOW:\n thread = threading.Thread(target=self._pwm_off, args=())\n thread.start()\n else:\n GPIO.output(self.id_, self.state)\n\n return self.get_status()", "def waitStatus(j, wtype='Load'):\n timeout = 1\n curIter = 0\n maxIter = 60\n done = False\n while not done:\n stat = j.GetStatus(wtype)\n if stat == \"complete\":\n done = True\n else:\n curIter = curIter + 1\n if curIter > maxIter:\n raise ValueError(\"timeout waiting\")\n time.sleep(timeout)", "def check_alert(self, event):\n \n # Get board logger\n board_logger = self.get_board_logger()\n\n # Loop for an hour and continue to alert every ten minutes \n current_time = datetime.now()\n end_time = current_time + timedelta(0, 60)\n # end_time = current_time + timedelta(hours=1)\n\n alarm_counter = 0\n while current_time < end_time:\n # Sleep for 10 minutes\n sleep(10);\n #sleep(600);\n\n # Prevent race condition between Board input_status and check_alert \n if GPIO.input(self.__pin) == 1:\n\n # Log alarm cycle\n alarm_counter += 1\n board_logger.info(\"Alarm Cycle #%s: Initiating event \" \n + \"alert.\", str(alarm_counter))\n\n # Call Event object's alert method\n event.alert(self.__ip, board_logger)\n\n # Get current time\n current_time = datetime.now()\n \n else:\n # Input status is 0 indicating recovery; Break out of loop and \n # return to main thread \n board_logger.info(\"Alarm state recovery.\") \n break\n \n # End of alert cycle; Return to main thread\n status = \"ALARM\" if self.get_input_status() else \"RECOVERY\"\n board_logger.info(\"End check alarm cycle. Current pin input \"\n + \"status is %s.\", status)", "def run(self):\n while self.running:\n # The minimum time required by the DHT22\n # is 2 seconds between reads.\n time.sleep(2.0)\n\n try:\n self._current_temperature_reading = self._sensor.temperature\n self._current_humidity_reading = self._sensor.humidity\n\n except RuntimeError:\n self._current_temperature_reading = None\n self._current_humidity_reading = None\n\n if (self._current_temperature_reading is not None and\n self._current_humidity_reading is not None):\n self.temperature_c = self._current_temperature_reading\n self.humidity = self._current_humidity_reading", "def monitor(self):\n\n # Log beginning of process\n board_logger = self.get_board_logger()\n board_logger.info(\"Beginning monitor of input for pin %s.\", \\\n self.__pin)\n \n # Set input status of pin for board object\n self.set_input_status(GPIO.input(self.__pin))\n status = \"ALARM\" if self.get_input_status() else \"RECOVERY\"\n board_logger.info(\"Initital status: %s\", status)\n\n # Deal with an error status upon power failure\n if self.get_input_status() == 1: self.initiate_event()\n\n # Monitor pin until KeyBoardInterrupt is detected\n while True:\n\n # Log monitoring\n board_logger.info(\"Monitoring for pin changes...\")\n \n # Wait for a change in pin status\n GPIO.wait_for_edge(self.__pin, GPIO.BOTH)\n\n sleep(0.005) #debounce for 5ms\n\n if self.get_input_status() != GPIO.input(self.__pin):\n \n # Set input status of pin\n self.set_input_status(GPIO.input(self.__pin))\n\n # Initiate event\n self.initiate_event()", "def fan_on():\n global PAUSED\n print(\"Temps vary too much; toggling fan on\")\n GPIO.output(HEATPIN, RELAYOFF)\n GPIO.output(COOLPIN, RELAYOFF)\n GPIO.output(FANPIN, RELAYON)\n while (loc_temp_diff > TEMPDIFF) and (PAUSED == False):\n time.sleep(10)\n if min_temp < TEMPLOW or max_temp > TEMPHIGH:\n break", "async def async_update(self) -> None:\n try:\n status = await self._device.command(\"status_102_0\")\n except pyaehw4a1.exceptions.ConnectionError as library_error:\n _LOGGER.warning(\n \"Unexpected error of %s: %s\", self._unique_id, library_error\n )\n self._attr_available = False\n return\n\n self._attr_available = True\n\n self._on = status[\"run_status\"]\n\n if status[\"temperature_Fahrenheit\"] == \"0\":\n self._attr_temperature_unit = UnitOfTemperature.CELSIUS\n else:\n self._attr_temperature_unit = UnitOfTemperature.FAHRENHEIT\n\n self._current_temperature = int(status[\"indoor_temperature_status\"], 2)\n\n if self._on == \"1\":\n device_mode = status[\"mode_status\"]\n self._attr_hvac_mode = AC_TO_HA_STATE[device_mode]\n\n fan_mode = status[\"wind_status\"]\n self._fan_mode = AC_TO_HA_FAN_MODES[fan_mode]\n\n swing_mode = f'{status[\"up_down\"]}{status[\"left_right\"]}'\n self._swing_mode = AC_TO_HA_SWING[swing_mode]\n\n if self._attr_hvac_mode in (HVACMode.COOL, HVACMode.HEAT):\n self._target_temperature = int(status[\"indoor_temperature_setting\"], 2)\n else:\n self._target_temperature = None\n\n if status[\"efficient\"] == \"1\":\n self._preset_mode = PRESET_BOOST\n elif status[\"low_electricity\"] == \"1\":\n self._preset_mode = PRESET_ECO\n elif status[\"sleep_status\"] == \"0000001\":\n self._preset_mode = PRESET_SLEEP\n elif status[\"sleep_status\"] == \"0000010\":\n self._preset_mode = \"sleep_2\"\n elif status[\"sleep_status\"] == \"0000011\":\n self._preset_mode = \"sleep_3\"\n elif status[\"sleep_status\"] == \"0000100\":\n self._preset_mode = \"sleep_4\"\n else:\n self._preset_mode = PRESET_NONE\n else:\n self._attr_hvac_mode = HVACMode.OFF\n self._fan_mode = None\n self._swing_mode = None\n self._target_temperature = None\n self._preset_mode = None" ]
[ "0.62453014", "0.6092352", "0.6056146", "0.6053579", "0.5992721", "0.5983913", "0.59502167", "0.5920007", "0.58608514", "0.5857563", "0.58304703", "0.5823211", "0.5805296", "0.57848245", "0.57581127", "0.5753424", "0.5740827", "0.57273775", "0.57046634", "0.56977254", "0.56729674", "0.5663147", "0.5658957", "0.5657926", "0.5654823", "0.5650362", "0.5637034", "0.562944", "0.5628877", "0.56253237" ]
0.70297736
0
Renders a calendar with models from the chosen month
def calendar(request, year=None, month=None): today = datetime.date.today() year = int(year) if year else today.year month = int(month) if month else today.month try: first_of_month = datetime.date(year, month, 1) except ValueError: # Not a valid year and month raise Http404 events = Event.objects.filter(event_start__year=year, event_start__month=month) cal = EventCalendar(events, year, month).formatmonth(year, month) user = request.user future_attending_events = attending_events(user, today) months = year * 12 + month - 1 # months since epoch (Christ) month_list = [ datetime.date(m // 12, m % 12 + 1, 1) for m in range(months - 5, months + 7) ] # Get some random dates in the current, next, and previous month. # These dates are used load the calendar for that month. # * prev is some day in the previous month # * this is some day in this month # * next is some day in the next month context = { "calendar": mark_safe(cal), "prev": first_of_month - datetime.timedelta(27), "this": first_of_month, "next": first_of_month + datetime.timedelta(32), "future_attending_events": future_attending_events, "month_list": month_list, } return render(request, "events/event_list.html", context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_cal(request, year=None, month=None):\n if year == None:\n # get the current comic as a starting point\n lToday = Comic.objects.filter(published=True).order_by('-date')[0].date\n year = lToday.year\n month = lToday.month\n\n return calendar(request, year, month)", "def calendar(request, pYear, pMonth):\n lYear = int(pYear)\n lMonth = int(pMonth)\n lCalendarFromMonth = datetime.date(lYear, lMonth, 1)\n lCalendarToMonth = datetime.date(lYear, lMonth, monthrange(lYear, lMonth)[1])\n lComics = Comic.objects.filter(published=True, date__gte=lCalendarFromMonth, date__lte=lCalendarToMonth).order_by('date')\n lCalendar = ArchiveCalendar(lComics).formatmonth(lYear, lMonth)\n lPreviousYear = lYear\n lPreviousMonth = lMonth - 1\n if lPreviousMonth == 0:\n lPreviousMonth = 12\n lPreviousYear = lYear - 1\n lNextYear = lYear\n lNextMonth = lMonth + 1\n if lNextMonth == 13:\n lNextMonth = 1\n lNextYear = lYear + 1\n pmn = named_month(lPreviousMonth)\n nmn = named_month(lNextMonth)\n \n # now for something fun:\n # if we have the first or last comics in a collection, we DON'T want to paginate this!\n fComic = lComics[0]\n lComic = lComics.reverse()[0]\n aComic = fComic.get_first()\n bComic = fComic.get_latest()\n \n \n if aComic is None or fComic.id == aComic.id:\n lPreviousYear = 0\n lPreviousMonth = 0\n if bComic is None or lComic.id == bComic.id:\n lNextYear = 0\n lNextMonth = 0\n \n\n return render(request, 'archive/archive_cal.html', {'Calendar' : mark_safe(lCalendar),\n 'Month' : str(lMonth),\n 'MonthName' : named_month(lMonth),\n 'Year' : str(lYear),\n 'PreviousMonth' : str(lPreviousMonth),\n 'PreviousMonthName' : pmn,\n 'PreviousYear' : str(lPreviousYear),\n 'NextMonth' : str(lNextMonth),\n 'NextMonthName' : nmn,\n 'NextYear' : str(lNextYear),\n })", "def events_in_month(request, year, month):\n month = datetime(year=year, month=month, day=1)\n next_month = month + timedelta(months=1)\n month_events = Event.objects.filter(date__gte=month, date__lte=next_month).order_by('date')\n return render_short(request, 'adhoc_calendar/events.html', context)", "def main(request, year=None):\n\tif year: year = int(year)\n\telse: year = time.localtime()[0]\n\n\tnowYear, nowMonth = time.localtime()[:2]\n\tlst = []\n\n\tfor y in [year, year+1, year+2]:\n\t\tmonthLst = []\n\t\tfor n, month in enumerate(MONTH_NAMES):\n\t\t\tentry\t= current = False\n\t\t\tentries\t= entry.objects.filter(date__year=y, date__month=n+1)\n\n\t\t\tif entries:\n\t\t\t\tentry = True\n\t\t\tif y == nowYear and n+1 == nowMonth:\n\t\t\t\tcurrent = True\n\t\t\tmonthLst.append(dict(n=n+1, name=month, entry=entry, current=current))\n\t\tlst.append((y, monthLst))\n\n\treturn render_to_response(\"cal/\", dict(years=lst, user=request.user, year=year, reminders=reminders(request)))", "def create_month_scr(self, month, toogle_today=False):\n\n scr = Screen()\n m = self.month_names_eng[self.active_date[1] - 1]\n scr.name = \"%s-%s\" % (m, self.active_date[2]) # like march-2015\n\n # Grid for days\n grid_layout = GridLayout(cols=7, rows=7, size_hint=(1, 1), pos_hint={\"top\": 1})\n scr.add_widget(grid_layout)\n\n # Days abbrs\n for i in range(7):\n if i >= 5: # weekends\n l = Label(text=self.days_abrs[i], color=(1, 0, 0, 1))\n else: # work days\n l = Label(text=self.days_abrs[i], text_size=(self.size[0], None), halign=\"center\")\n\n grid_layout.add_widget(l)\n\n global holiday, halfday\n\n # Buttons with days numbers\n for week in month:\n for day in week:\n if day[1] >= 6: # weekends\n self.tbtn = ToggleBtn(text=str(day[0]), color=(0, 0, 0, 1))\n else:\n self.tbtn = ToggleBtn(text=str(day[0]), color=(0, 0, 0, 1))\n for i in range(len(holiday)):\n if self.active_date[2] == holiday[i][2]:\n if self.active_date[1] == holiday[i][1]:\n if day[0] == holiday[i][0]:\n self.tbtn.background_color=(128, 0, 128, 1)\n for i in range(len(halfday)):\n if self.active_date[2] == halfday[i][2]:\n if self.active_date[1] == halfday[i][1]:\n if day[0] == halfday[i][0]:\n self.tbtn.background_color=(0, 255, 255, 0.5)\n\n self.tbtn.bind(on_press=self.get_btn_value)\n\n if toogle_today:\n # Down today button\n if day[0] == self.active_date[0] and day[2] == 1:\n self.tbtn.state = \"down\"\n # Disable buttons with days from other months\n if day[2] == 0:\n self.tbtn.text = \" \"\n self.tbtn.disabled = True\n self.tbtn.background_color = (0, 0, 0, 0.1)\n\n grid_layout.add_widget(self.tbtn)\n\n self.sm.add_widget(scr)", "def show_month(year, month, page):\n from calendar import month_name\n if month not in range(1, 13): abort(404)\n per_page = current_app.config['POSTS_PER_PAGE']\n posts = Post.query.filter(db.extract('year', Post.datetime)==year)\n posts = posts.filter(db.extract('month', Post.datetime)==month)\n posts = posts.order_by(Post.id.desc()) \n if not session.get('logged_in'): posts = posts.filter_by(visible=True)\n items = posts.limit(per_page).offset((page - 1) * per_page).all()\n pagination = Pagination(posts, page=page, per_page=per_page, \n total=posts.count(), items=items)\n if items:\n flash(\"Posts from %s %d\" % (month_name[month], year))\n else:\n flash(\"No entries here so far\")\n return render_template('posts.html', pagination=pagination,\n endpoint_func=lambda x: url_for('main.show_month', year=year, month=month, \n page=x))", "def view_month(request, year, month):\n year, month = int(year), int(month)\n transactions = Transaction.month(\n year,\n month,\n user=request.user,\n account__include_in_balance=True,\n )\n start = make_aware(datetime(year, month, 1))\n balance = Transaction.objects.filter(\n user=request.user,\n account__include_in_balance=True,\n date__lt=start,\n ).sum()\n return render(request, 'ledger/pages/view_month.html', {\n 'title': start.strftime('%B %Y'),\n 'transactions': transactions,\n 'balance': balance,\n 'prev_month': adjust_month(year, month, -1),\n 'next_month': adjust_month(year, month, 1),\n })", "def month_archive(request, year, month):\n articles = Article.objects.filter(pub_date__year=year, pub_date__month=month)\n context = { 'year': year, 'month': month, 'articles': articles }\n pprint_local_vars(locals())\n return render(request, 'news/month_archive.html', context)", "def calender(self, month, year):\n\n day = ['S', ' M', ' T', ' W', ' Th', 'F', ' S']\n\n days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\n values = 1\n d = 1\n\n m = month\n y = year\n y0 = y - (14 - m) // 12\n x = y0 + y0 // 4 - y0 // 100 + y0 // 400\n m0 = m + 12 * ((14 - m) // 12) - 2\n d0 = (d + x + 31 * m0 // 12) % 7\n\n if utility_obj.isleap_year(str(year)):\n days[1] = 29\n row = 6\n column = 7\n two_d_array = [[0 for j in range(column)] for i in range(row)]\n\n print('Your Calender is Ready\\n')\n\n for i in range(0, 6 + 1):\n print(day[i], end=' ')\n print()\n for i in range(row):\n\n for j in range(column):\n\n if values <= days[m - 1]:\n if i == 0 and j < d0:\n two_d_array[i][j] = ' '\n continue\n\n two_d_array[i][j] = values\n values += 1\n\n for i in range(row):\n\n for j in range(column):\n if two_d_array[i][j] != 0:\n x = two_d_array[i][j]\n x1 = str(x).ljust(2)\n print(x1, end=\" \")\n\n print()", "def month_content():\n \n user_id = current_identity.id\n month = request.form.get(\"month\")\n year = request.form.get(\"year\")\n \n if not is_month(month, year, user_id):\n establish_month(month, year, user_id)\n\n dayContentDict = format_day_content(month, year, user_id)\n\n d = collections.defaultdict(dict)\n response = {\n \"status\": \"ok\",\n \"dayContent\" : d\n }\n \n if dayContentDict:\n response[\"dayContent\"] = dayContentDict\n \n return jsonify(response)", "def news_for_month(self):\n\n raise NotImplementedError", "def display_calendar(daze, month, year):\n log = daze.dateDict\n if not year:\n year = date.today().year # defaults to this year\n if month:\n first = date(year, month, 1)\n last = max([day for day in cal.itermonthdates(year, month) if day.month == month])\n s, ndates, firstdate, lastdate = daze.summarize(firstdate=first, lastdate=last)\n else:\n s, ndates, firstdate, lastdate = daze.summarize()\n places = sorted(s, key=s.get, reverse=True)\n colors = ['green', 'magenta', 'white', 'cyan', 'blue', 'red', 'yellow']\n months = calendar.month_name[1:]\n dates = [firstdate + timedelta(days=i) for i in range((lastdate - firstdate).days + 1)]\n\n matches = {p: c for (p, c) in zip(places, colors)}\n\n for (p, c) in matches.items():\n click.secho(\" %s \" % p, bg=c, fg='black', bold=True)\n\n for _date in dates:\n if _date.day == 1 or _date == firstdate:\n click.echo('')\n click.echo(\"\\n\" + months[_date.month - 1])\n if (_date.isoweekday() != 7):\n click.echo(\" \" * 3 * _date.isoweekday(), nl=False)\n if _date in log:\n p = log[_date]\n click.secho(\"%s\" % str(_date.day).rjust(3),\n fg='black',\n bg=matches[p],\n nl=(_date.isoweekday() == 6))\n else:\n click.secho(\"%s\" % str(_date.day).rjust(3),\n fg='black', nl=(_date.isoweekday() == 6))\n\n click.echo('\\n\\n\\n')", "def formatmonth(self, theyear, themonth, withyear=True):\n\n schedules = Calendar_Group.objects.filter(day__month=themonth)\n\n v = []\n a = v.append\n a('<div class=\"table-responsive\"><table class=\"table table-bordered\" cellpadding=\"0\" cellspacing=\"0\" class=\"month\">')\n a('\\n')\n a(self.formatmonthname(theyear, themonth, withyear=withyear))\n a('\\n')\n a(self.formatweekheader())\n a('\\n')\n for week in self.monthdays2calendar(theyear, themonth):\n a(self.formatweek(week, schedules))\n a('\\n')\n a('</table></div>')\n a('\\n')\n return ''.join(v)", "def index(http_request, year=datetime.datetime.now().strftime(\"%Y\"), month=datetime.datetime.now().strftime(\"%m\")):\n\t# make sure the year number and month number are ints\n\tyear = int(year)\n\tmonth = int(month)\n\ttimestamp = datetime.datetime(year, month, 1)\n\t\n\t#initialize container for dates to be stored\n\tdate_list = []\n\t\n\tevents = Event.objects.filter(edate__year=year).filter(edate__month=month)\n\tfor event in events:\n\t\tdate_list.append({'id':event.id, 'day':datetime.date(event.edate.year, event.edate.month, event.edate.day), 'title':event.title, 'class':'event'})\n\n\tprojects = Project.objects.filter(due__year=year).filter(due__month=month)\n\tfor project in projects:\n\t\tdate_list.append({'id':project.id, 'day':datetime.date(project.due.year, project.due.month, project.due.day), 'title':project.name, 'class':'projects'})\n\t\t\t\n\t# next month's timestamp\n\tif month == 12:\n\t\tnext_month = datetime.datetime(year+1, 1, 1)\n\telif month < 12:\n\t\tnext_month = datetime.datetime(year, month+1, 1)\n\t\n\tupcoming_projects = Project.objects.filter(due__year=next_month.year).filter(due__month=next_month.month)\n\t\n\t\n\treturn render_to_response('schedule_cal.html', \n\t\t\t\t {'date_list':date_list, \n\t\t\t\t 'date':timestamp, \n 'urlprefix': urlprefix (),\n\t\t\t\t 'upcoming_projects':upcoming_projects}, \n\t\t\t\t )", "def build_month_graphs(months):\n for month in months:\n month.find_grid_lines()\n\n # Insert left edge of the graph\n month.vert_insert_line(0, distance=-60)\n\n # Insert right edge of the graph\n right_edge = ([month.width, 0], [month.width, month.height])\n month.vert_add_line(right_edge[0], right_edge[1])\n\n month.get_cells()\n month.get_col_labels()", "def month():\n \n # get month entered by user - if no month entered default to current month\n month = request.args.get(\"month\", datetime.now().strftime(\"%Y-%m\"))\n \n # get budget data for month as a dictionary\n data = budget_data(month)\n \n return json.dumps(data)", "def formatmonth(self, theyear, themonth, withyear=True):\n\n events = Event.objects.filter(day__month=themonth)\n\n v = []\n a = v.append\n a('<table border=\"0\" cellpadding=\"0\" cellspacing=\"0\" class=\"month\">')\n a('\\n')\n a(self.formatmonthname(theyear, themonth, withyear=withyear))\n a('\\n')\n a(self.formatweekheader())\n a('\\n')\n for week in self.monthdays2calendar(theyear, themonth):\n a(self.formatweek(week, events))\n a('\\n')\n a('</table>')\n a('\\n')\n return ''.join(v)", "def set_month(self, month):\r\n\t\tmonths = ['Enero', 'Febrero', 'Marzo', 'Abril',\r\n\t\t\t\t 'Mayo', 'Junio', 'Julio', 'Agosto'\r\n\t\t\t\t 'Septiembre', 'Octubre', 'Noviembre', 'Diciembre']\r\n\t\tfor i in range(12):\r\n\t\t\tif month == i: \r\n\t\t\t\treturn months[i-1]", "def formatDay(self, themonth, date, num_weeks):\n if date.month == themonth:\n day_class = 'day'\n else:\n day_class = 'noday' # day outside month\n\n html = '<td class=\"%s' % day_class\n\n # if this is today then highlight it\n if date == self.today:\n html += ' today'\n today_text = 'Today '\n else:\n today_text = ''\n\n # if this is the selected date then tag it\n if date == self.selected_date or (self.selected_record\n and date == self.selected_record.start_date):\n html += ' selected'\n # if a filter range is set then tag it\n elif (self.filter_start_date and self.filter_finish_date\n and self.filter_start_date <= date\n and date <= self.filter_finish_date):\n html += ' filtered'\n\n html += ('\" style=\"height: %f%%\"><div class=\"%s_header\">'\n '<a class=\"block\" '\n 'href=\"?year=%d&month=%d&day=%d&clear_recording_id=1\">'\n '%s%d</a></div>' % (90.0 / num_weeks, day_class,\n date.year, date.month, date.day, today_text, date.day))\n\n if self._storage:\n for recording in self._storage.getRecordings(date,\n station=self.filter_station):\n extra_div_class = \"\"\n if (self.selected_record\n and recording.id == self.selected_record.id):\n extra_div_class += \" selected_entry\"\n if ((self.filter_title and self.filter_title\n != recording.title)\n or (self.filter_start_date and self.filter_start_date\n > recording.finish_time.date())\n or (self.filter_finish_date and self.filter_finish_date\n < recording.start_time.date())):\n extra_div_class += \" filtered_out\"\n html += ('<div class=\"day_entry%s\"><a class=\"block\" '\n 'href=\"?year=%d&month=%d&recording_id=%d'\n '&set_recording_id=1\">\\n'\n '<span class=\"recording_time\">%s</span>\\n'\n '<span class=\"recording_station\">%s</span>\\n'\n '<span class=\"recording_title\">%s</span>\\n'\n '</a></div>\\n' % (extra_div_class, date.year,\n date.month, recording.id,\n formatTimeUI(recording.start_time, compact=True),\n formatStationName(recording.station, compact=True),\n recording.title))\n\n return html + '</td>'", "def month(m=0):\n if not 1 <= m <= 12:\n # throw error\n return jsonify({}), status.HTTP_400_BAD_REQUEST\n holidays = Holidays.query.filter_by(month=m).all()\n\n this_month = {}\n for h in holidays:\n this_month[h.day] = this_month.get(h.day, []) + [h.holiday]\n\n return jsonify({\"month\": m, \"holidays\": this_month})", "def print_calendar(month, year):\n print MONTH_NAME[month - 1] + ', ' + str(year)\n\n calendar = calculate_date(month, year)\n for i in DAY_NAME:\n print(i),\n\n print\n\n for i in range(len(calendar)):\n if calendar[i] == 0:\n print(align_day_block(0)),\n else:\n print(align_day_block(calendar[i])),\n\n if i % 7 == 0:\n print", "def get_month():\n return handle_invalid_inputs(question_3, months)", "def get_date_display(self, context):\n return '{year}/{month}'.format(year=self.get_year(),\n month=self.get_month().zfill(2))", "def calendar_month(year, month):\n start = datetime.datetime(year, month, 1)\n if month == 12:\n end = datetime.datetime(year+1, 1, 1)\n else:\n end = datetime.datetime(year, month+1, 1)\n print(start)\n print(end)\n return start, end", "def contract_data_chart(request):\n current_date = datetime.today()\n months = [i for i in range(1, 13)]\n data = {\n 'series': [],\n 'labels': settings.CHART_MONTHS_LABELS,\n }\n\n contract_count = []\n for month in months:\n contract_count.append(\n Contract.objects.filter(\n start_date__month=month,\n start_date__year=current_date.year,\n ).values('id').count()\n )\n data['series'].append({\n \"name\": _(\"Contract\"),\n \"data\": contract_count,\n })\n\n return JsonResponse(data)", "def display_calendar_redo(daze, year, month):\n log = daze.dateDict\n\n # Set first and last dates\n if year is None:\n year = date.today().year\n if month is None:\n first = date(year, 1, 1)\n if year == date.today().year:\n last = date.today()\n else:\n last = date(year, 12, 31)\n else:\n first = date(year, month, 1)\n last = date(2016, month, calendar.monthrange(2016, month)[1])\n\n # Get summarized data\n s, ndates, firstdate, lastdate = daze.summarize()\n places = sorted(s, key=s.get, reverse=True)\n colors = ['green', 'magenta', 'white', 'cyan', 'blue', 'red', 'yellow']", "def monthly_schedule(self,month):\n response = requests.get(f'http://company.com/{self.lname}/{month}')\n if response.ok:\n return response.text\n else:\n return 'Bad Response!'", "def month(m):\n\t\tx = db.cquery(\"month\",m)\n\t\tprint \"Total:\", x[0] #@BUG when zero sometimes displays \"1\"\n\t\tf = raw_input(\"[L]ist [N]ew overview or[B]ack to home \").lower()\n\t\tif f == \"l\":\n\t\t\tui.mont1(m)\n\t\t\tfor i in x[1]:\n\t\t\t\tprint ui.statsid(),i[0], i[1],\" \",ui.statstimein(), i[2], ui.statstimeout(),i[3]\n\t\t\traw_input(\"[Enter] to go back to search\")\n\t\t\thome_stats()\n\t\telif f == \"n\": home_stats()\n\t\telif f == \"b\": home()\n\t\telse:\n\t\t\traw_input(\"I didnt get that... Press [Enter] to go back to stats...\")\n\t\t\thome_stats()", "def select_calendar_month(X, year_month, timename='time'):\n\n def calendar_month(year, month):\n \"\"\"\n For a given year and month return the date of the begining of the month and the date of the beginning of the next month\n \"\"\"\n start = datetime.datetime(year, month, 1)\n if month == 12:\n end = datetime.datetime(year+1, 1, 1)\n else:\n end = datetime.datetime(year, month+1, 1)\n print(start)\n print(end)\n return start, end\n \n year, month = year_month\n \n start, end = calendar_month(year, month)\n \n # Gotta do better than this\n if timename.lower() == 'time':\n X_cm = X.sel(time = slice(start, end))\n elif timename.lower() == 'time_wave':\n X_cm = X.sel(time_wave = slice(start, end))\n # Gotta do better than this\n \n return X_cm, [start, end]", "def changeDisplayedMonth(self):\n #ho bisogno di sapere qual è il mese mostrato\n currentMonth = self.indexMonth\n currentYear = self.currentYear\n\n sender = self.sender().objectName()\n if sender == 'bot_next':\n # if currentMonth < 11:\n if self.indexMonth < 11:\n self.indexMonth += 1\n self.setBaseDate(self.baseDate.addMonths(1))\n else:\n self.indexMonth = 0\n self.setCurrentYear(currentYear+1)\n # print('baseDate before', self.baseDate)\n self.setBaseDate(self.baseDate.addMonths(1))\n # print('baseDate after', self.baseDate)\n # print('new Year: ', self.currentYear)\n\n elif sender == 'bot_prev':\n # if currentMonth > 0:\n if self.indexMonth > 0:\n self.indexMonth -= 1\n self.setBaseDate(self.baseDate.addMonths(-1))\n else:\n self.indexMonth = 11\n self.setCurrentYear(currentYear-1)\n self.setBaseDate(self.baseDate.addMonths(-1))\n # print('new Year: ', self.currentYear)\n if currentMonth != self.indexMonth:\n # print(f'currentPageChanged.emit({self.indexMonth})')\n self.currentPageChanged.emit(self.indexMonth)\n self.combo_mesi.setCurrentIndex(self.indexMonth)\n if currentYear != self.currentYear:\n # print('current year changed')\n self.setListaGiorniDellAnno(self.createDates(self.baseDate), self.indexMonth)" ]
[ "0.7238604", "0.7071681", "0.705804", "0.6771286", "0.6719591", "0.66115034", "0.65900797", "0.63997763", "0.6175017", "0.60905683", "0.6089079", "0.6088855", "0.60475314", "0.60411173", "0.59932786", "0.59637064", "0.5923851", "0.5848303", "0.58288735", "0.5822559", "0.5819274", "0.5797147", "0.57908463", "0.5790276", "0.57812107", "0.5772006", "0.57542235", "0.5738204", "0.5726188", "0.5717778" ]
0.7240797
0
Returns a given event or bedpres as an iCal .ics file
def ical_event(request, event_id): event = Event.objects.get(id=event_id) # Use the same template for both Event and BedPres. template = loader.get_template("events/event_icalendar.ics") context = { "event_list": (event,), } response = HttpResponse(template.render(context), content_type="text/calendar") response["Content-Disposition"] = "attachment; filename=Nabla_%s.ics" % event.slug return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_ics(self, request, pk, *args, **kwargs):\n\n event = Event.objects.get(pk=pk)\n ics_file = event.export_event()\n response = Response(ics_file)\n response['Content-Disposition'] = 'attachment; ' \\\n 'filename=' + event.label + '.ics'\n return response", "def generate_ics(events, config):\n\n # Create the Calendar\n calendar = icalendar.Calendar()\n calendar.add('prodid', config.calendar_prodid)\n calendar.add('version', '2.0')\n calendar.add('method', 'publish')\n\n for event_data in events:\n # Create the event\n event = icalendar.Event()\n\n # Populate the event\n event.add('summary', event_data['title'])\n event.add('description', get_description(event_data))\n event.add('uid', event_data['id'])\n event.add('location', event_data['place'])\n event.add('dtstart', get_datetime(event_data, 'when_start'))\n if event_data['when_end']:\n event.add('dtend', get_datetime(event_data, 'when_end'))\n event.add('dtstamp', datetime.datetime.now())\n\n # Add the event to the calendar\n calendar.add_component(event)\n\n return calendar.to_ical()", "def ical_string(self) -> str:\n tz = ''\n if self.timezone != '':\n tz = ';TZID=' + self.timezone\n result = ['BEGIN:VCALENDAR',\n 'BEGIN:VEVENT',\n 'CREATED:' + self._created,\n 'DESCRIPTION:' + self.description,\n 'DTEND' + tz + ':' + self._end,\n 'DTSTAMP' + tz + ':' + self._dtstamp,\n 'DTSTART' + tz + ':' + self._start,\n 'LAST-MODIFIED:' + self.lastmodified,\n 'LOCATION:' + self.location,\n 'SEQUENCE:' + str(self._sequence),\n 'SUMMARY:' + self.summary,\n 'UID:' + self._uid,\n 'END:VEVENT',\n 'END:VCALENDAR']\n return '\\n'.join(result)", "def export_event(self):\n\n cal = Eve()\n cal.add('summary', str(self.categories))\n cal.add('description', self.label)\n cal.add('dtstart', vDatetime(self.start))\n cal.add('dtend', vDatetime(self.end))\n return cal.to_ical()", "def generate_ics(days: Sequence[dict], filename: Text) -> None:\n cal = Calendar()\n cal.add(\"X-WR-CALNAME\", \"中国法定节假日\")\n cal.add(\"X-WR-CALDESC\", \"中国法定节假日数据,自动每日抓取国务院公告。\")\n cal.add(\"VERSION\", \"2.0\")\n cal.add(\"METHOD\", \"PUBLISH\")\n cal.add(\"CLASS\", \"PUBLIC\")\n\n cal.add_component(_create_timezone())\n\n days = sorted(days, key=lambda x: x[\"date\"])\n\n for fr, to in _iter_date_ranges(days):\n start = _cast_date(fr[\"date\"])\n end = _cast_date(to[\"date\"]) + datetime.timedelta(days=1)\n\n name = fr[\"name\"] + \"假期\"\n if not fr[\"isOffDay\"]:\n name = \"上班(补\" + name + \")\"\n cal.add_component(_create_event(name, start, end))\n\n with open(filename, \"wb\") as f:\n f.write(cal.to_ical())", "def create_ical_file(list_of_events, strasse, hausnummer):\n cal = Calendar()\n\n # Some properties are required to be compliant:\n cal.add('prodid', '-//My calendar product//mxm.dk//')\n cal.add('version', '2.0')\n\n global total_number_of_events\n total_number_of_events = len(list_of_events)\n\n all_ical_events = create_cal_events(list_of_events, strasse, hausnummer)\n for evnt in all_ical_events:\n # Add the event to the calendar:\n cal.add_component(evnt)\n\n cal_as_ical = cal.to_ical()\n create_folder_if_not_exists()\n # Write iCal file to disk\n return save_ical_file(cal_as_ical, get_filename(strasse, hausnummer))", "def as_ical(self):\n if self.date_is_approximate:\n return None\n\n ymd = (self.date.year, self.date.month, self.date.day)\n event_date = date(*ymd)\n event = icalendar.Event()\n event.add(\"dtstart\", event_date)\n event.add(\"dtend\", event_date + timedelta(days=1))\n event.add(\"uid\", self.ical_uid)\n event.add(\"summary\", \"Django Girls %s\" % self.city)\n event.add(\"location\", f\"{self.country}, {self.city}\")\n return event", "def writeIcal(calendarItems):\n\n cal = Calendar()\n cal.add('prodid', '-//Gremien Kalender//opendata.stadt-muenster.de//')\n cal.add('version', '2.0')\n\n with open(OUTPUT_FILE_CSV, 'w', newline='') as csvfile:\n csvWriter = csv.writer(csvfile, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n csvWriter.writerow(['MeetingID', 'Start', 'Ende', 'Gremium', 'Veranstaltung', 'Ort', 'Weitere Information'])\n\n for key, session in sorted(calendarItems.items()):\n\n # Prepare event title (and convert datestrings to datetime objects with timezone)\n meetingId = session[5]\n sessionName = session[2]\n committee = session[3]\n location = session[4]\n start = datetime.strptime(session[0], \"%Y-%m-%dT%H:%M:%S%z\")\n end = datetime.strptime(session[1], \"%Y-%m-%dT%H:%M:%S%z\")\n meetingUrl = OPARL_MEETING_URL.format(meetingId)\n logging.info(\"Adding ical: %s %s %s\", start, committee, sessionName)\n\n # Create ical event (and convert datetimes to UTC)\n event = Event()\n event.add('summary', '{} - {}'.format(committee, sessionName))\n event.add('dtstart', start.astimezone(pytz.utc))\n event.add('dtend', end.astimezone(pytz.utc))\n event.add('dtstamp', datetime.now())\n event.add('description', meetingUrl)\n event.add('uid', '20220215T101010/{}@ms'.format(meetingId))\n\n organizer = vCalAddress('MAILTO:[email protected]')\n organizer.params['cn'] = vText('Stadt Münster')\n organizer.params['role'] = vText('Ratsinformationssytem')\n event['organizer'] = organizer\n event['location'] = vText(location)\n\n # Add event to calendar\n cal.add_component(event)\n\n # Add event to CSV\n csvWriter.writerow([meetingId, str(start), str(end), committee, sessionName, location, meetingUrl])\n\n\n # Write ical file\n f = open(OUTPUT_FILE_ICS, 'wb')\n f.write(cal.to_ical())\n f.close()", "def response(self):\n response = HttpResponse(self.cal.to_ical(), content_type='text/calendar')\n response['Content-Type'] = 'text/calendar; charset=utf-8'\n response['Content-Disposition'] = 'attachment; filename=' + self.filename + '.ics'\n return response", "def response(self):\n response = HttpResponse(self.cal.to_ical(), content_type='text/calendar')\n response['Content-Type'] = 'text/calendar; charset=utf-8'\n response['Content-Disposition'] = 'attachment; filename=' + self.filename + '.ics'\n return response", "def output(self):\n return self.cal.to_ical()", "def output(self):\n return self.cal.to_ical()", "def calendar_for_event_description(ed):\n return icemac.ab.calendar.interfaces.ICalendar(ed.context)", "def ical(self) -> Calendar:\n cal = Calendar()\n event = IEvent()\n event.add(\"summary\", \"Video Chat\")\n event.add(\"dtstart\", self.start)\n cal.add_component(event)\n return cal.to_ical()", "def export_calendar(calendar, filename):\n filename = verify_filename(filename)\n try:\n with open(filename, 'w') as file:\n file.writelines(calendar)\n print(f\"File {filename} created in current directory.\")\n except OSError:\n print(f\"Could not open/read {filename}\")\n sys.exit(1)", "def get_ical_url(self):\n return reverse(\n \"events:ical\",\n kwargs={\n \"year\": self.start_time.year,\n # force two-digit month\n \"month\": \"%02d\" % self.start_time.month,\n \"slug\": self.slug,\n },\n )", "def agenda_ical(request, num=None, name=None, acronym=None, session_id=None):\n meeting = get_meeting(num, type_in=None)\n schedule = get_schedule(meeting, name)\n updated = meeting.updated()\n\n if schedule is None and acronym is None and session_id is None:\n raise Http404\n\n assignments = SchedTimeSessAssignment.objects.filter(\n schedule__in=[schedule, schedule.base],\n session__on_agenda=True,\n )\n assignments = preprocess_assignments_for_agenda(assignments, meeting)\n AgendaKeywordTagger(assignments=assignments).apply()\n\n try:\n filt_params = parse_agenda_filter_params(request.GET)\n except ValueError as e:\n return HttpResponseBadRequest(str(e))\n\n if filt_params is not None:\n # Apply the filter\n assignments = [a for a in assignments if should_include_assignment(filt_params, a)]\n\n if acronym:\n assignments = [ a for a in assignments if a.session.historic_group and a.session.historic_group.acronym == acronym ]\n elif session_id:\n assignments = [ a for a in assignments if a.session_id == int(session_id) ]\n\n for a in assignments:\n if a.session:\n a.session.ical_status = ical_session_status(a)\n\n return render(request, \"meeting/agenda.ics\", {\n \"schedule\": schedule,\n \"assignments\": assignments,\n \"updated\": updated\n }, content_type=\"text/calendar\")", "def read_ics(self, filename, lat_long_data):\n with open(os.path.join(self.zoneinfo_path, filename), \"r\") as zone:\n zoneinfo = zone.readlines()\n\n with open(os.path.join(self.zoneinfo_pure_path, filename), \"r\") as zone:\n zoneinfo_pure = zone.readlines()\n\n ics_data = []\n for i in range(0, len(zoneinfo)):\n line = zoneinfo[i]\n key = line[:line.find(\":\")]\n\n if key == \"BEGIN\":\n if line != \"BEGIN:VCALENDAR\\r\\n\":\n ics_data.append(line)\n elif key == \"END\":\n if line != \"END:VCALENDAR\\r\\n\":\n ics_data.append(line)\n elif key in (\"TZID\", \"TZOFFSETFROM\", \"TZOFFSETTO\", \"TZNAME\", \"DTSTART\"):\n ics_data.append(line)\n elif key == \"RRULE\":\n if line == zoneinfo_pure[i]:\n ics_data.append(line)\n else:\n sys.stderr.write(\"Using pure version of %s\\n\" % filename[:-4])\n ics_data.append(zoneinfo_pure[i])\n\n zone_data = {\n \"ics\": \"\".join(ics_data).rstrip()\n }\n zone_name = filename[:-4]\n if zone_name in lat_long_data:\n zone_data[\"latitude\"] = lat_long_data[zone_name][0]\n zone_data[\"longitude\"] = lat_long_data[zone_name][1]\n\n return zone_data", "def save_calendar(calendar):\n with open(\"calendar.txt\", \"w\") as fl:\n ls_str = \"\"\n keys = list(calendar.keys())[::-1]\n for key in keys:\n ls_str += f\"{key}:\"\n for ev in calendar[key]:\n ls_str += f\"{str(ev['start']).zfill(2)}-{str(ev['end']).zfill(2)} {ev['title']}\\t\"\n ls_str = ls_str[:-1]\n ls_str += \"\\n\"\n fl.write(ls_str)\n return True", "def get_current_events():\n resp = requests.get(ICAL_FEED)\n if resp.status_code != 200:\n logger.error('> Error retrieving iCal feed!')\n return None\n\n try:\n cal = ics.Calendar(resp.text)\n except Exception as e:\n logger.error('> Error parsing iCal data ({})'.format(e))\n return None\n\n return cal", "def modify_cal(cal, convert_dic):\n new_cal = Calendar()\n for elm in cal.walk():\n if elm.name == \"VEVENT\":\n event = elm\n event[\"summary\"] = convert_dic[str(elm.get(\"summary\"))] \n new_cal.add_component(event)\n return new_cal", "def show_events(usrservice,calservice):\r\n print(args.action, args.inuser, 'celendar events')", "def test_convert_to_cal_header(caltype, obj, change_working_dir):\n # A random NIRI image\n ad = astrodata.open(astrodata.testing.download_from_archive('N20200127S0023.fits'))\n ad_out = gt.convert_to_cal_header(ad, caltype=caltype, keyword_comments=keyword_comments)\n\n # FITS WCS keywords only get changed at write-time, so we need to\n # write the file to disk and read it back in to confirm.\n with change_working_dir():\n ad_out.write(\"temp.fits\", overwrite=True)\n ad = astrodata.open(\"temp.fits\")\n\n assert ad.observation_type() == caltype.upper()\n # Let's not worry about upper/lowercase\n assert ad.object().upper() == obj.upper()\n\n assert ad.phu.get('RA', 0.) == ad.phu.get('DEC', 0.0) == 0.0\n\n assert ad.ra() == ad.dec() == 0.0", "def read_edf(fname, what='events'):\n if not os.path.isfile(fname):\n err = \"%r is not a file.\" % fname\n raise ValueError(err)\n\n temp_dir = tempfile.mkdtemp()\n\n # edf2asc does not seem to handle spaces in filenames?\n if ' ' in fname:\n dst = os.path.join(temp_dir, os.path.basename(fname).replace(' ', '_'))\n# shutil.copy(fname, dst)\n os.symlink(fname, dst)\n fname = dst\n\n # construct the conversion command\n cmd = [get_bin('edfapi', 'edf2asc'), # options in Manual p. 106\n '-t', ] # use only tabs as delimiters\n if what == 'events':\n cmd.append('-e') # outputs event data only\n elif what == 'samples':\n cmd.append('-s') # outputs sample data only\n elif what == 'all':\n raise NotImplementedError()\n else:\n raise ValueError(\"what must be 'events' or 'samples', not %r\" % what)\n\n cmd.extend(('-nst', # blocks output of start events\n '-p', temp_dir, # writes output with same name to <path> directory\n fname))\n\n # run the subprocess\n p = subprocess.Popen(cmd)\n stdout, stderr = p.communicate()\n # Don't check return code because it always return 255\n\n # find asc file\n name, _ = os.path.splitext(os.path.basename(fname))\n ascname = os.path.extsep.join((name, 'asc'))\n asc_path = os.path.join(temp_dir, ascname)\n if not os.path.exists(asc_path):\n print(\"======\\nstdout\\n======\\n%s\" % stdout)\n print(\"======\\nstderr\\n======\\n%s\" % stderr)\n raise subprocess.CalledProcessError(p.returncode, cmd, (stdout, stderr))\n with open(asc_path) as asc_file:\n asc_str = asc_file.read()\n\n # clean\n shutil.rmtree(temp_dir)\n return asc_str", "def __str__(self):\n return str(self.GetCalendarString())", "def __str__(self):\n return str(self.GetCalendarString())", "def format_obsid_as_calendar_date(obsid):\n from astropy.time import Time\n t = Time(obsid, format='gps')\n return t.utc.iso[:10]", "def download_fetch_ical(self, uri, force=False):\n if self.calendar is None or force:\n tmpfile = os.path.join(os.path.dirname(self.ourpath), 'calcache.ics')\n fetchit = lambda: urllib.urlretrieve(uri, tmpfile)\n\n # Variables to check when the .ics file was last modified\n now = datetime.datetime.now(self.localtime)\n delta = datetime.timedelta(days=1)\n\n # If a cache file exists, lets check to see if it's younger than a day\n if os.path.exists(tmpfile):\n modtime = datetime.datetime.fromtimestamp(os.path.getmtime(tmpfile), now.tzinfo)\n one_day = datetime.timedelta(days=1)\n delttt=now-one_day\n\n # If we're older than one day, delete it and get a fresh one.\n if modtime < delttt:\n os.remove(tmpfile)\n fetchit()\n\n else:\n fetchit()\n\n # Finally load in the .ics file\n self.calendar = vobject.readOne(open(tmpfile).read())", "def create_vcal_string(start_date, location, content, uid):\n \n time_delta = settings.REMINDER_TIME_BEFORE_APPOINTMENT\n #need time difference in minutes for alarm\n alarm_time = start_date - time_delta\n end_date = start_date + settings.DEFAULT_APPOINTMENT_DURATION\n \n vcal_data = \\\n\"\"\"BEGIN:VCALENDAR\nVERSION:1.0\nBEGIN:VEVENT\nUID:%(uid)s\nDTSTART:%(start)s\nDTEND:%(end)s\nDESCRIPTION:%(content)s\nSUMMARY:%(content)s\nDTSTAMP:%(stamp)s\nLOCATION:%(location)s\nDALARM:%(alarm)s\nAALARM:%(alarm)s\nEND:VEVENT\nEND:VCALENDAR\"\"\" % {\\\n 'uid': uid,\n 'content': content,\n 'location': location,\n 'start': start_date.strftime(\"%Y%m%dT%H%M%S\"),\n 'end': end_date.strftime(\"%Y%m%dT%H%M%S\"),\n 'stamp': datetime.now().strftime(\"%Y%m%dT%H%M%S\"), \n 'alarm': alarm_time.strftime(\"%Y%m%dT%H%M%S\")}\n \n return vcal_data", "def events(self, calendar=\"\", date=\"\"):\n command = list(CALENDAR_COMMAND)\n\n if calendar:\n command.extend([\"-f\",\n join(self.calendar_directory,\n \"calendar.{}\".format(calendar))])\n\n if date:\n command.extend([\"-t\", \"{}\".format(date)])\n\n calendar_output = subprocess.check_output(command).decode(\"utf-8\")\n # Split the lines and filter the empty lines.\n lines = [line for line in calendar_output.split(\"\\n\") if line]\n lines_copy = list(lines)\n index = 0\n for event in lines:\n if event.startswith(\"\\t\") or event.startswith(\" \"):\n # This line is a continuation of the previous one.\n lines_copy[index - 1] += event\n else:\n lines_copy[index] = event\n index += 1\n\n # Substitute multiple whitespaces by one space.\n events = [' '.join(event.split()) for event in lines_copy[:index]]\n\n # Replace '&' by 'and' because PicoTTS pronounces it as 'ampersand'.\n # See https://github.com/snipsco/snips-issues/issues/85\n events = [event.replace('&', 'and') for event in events]\n\n # Create a sentence with the date and a new sentence with the description.\n # Strip the asterisk (*) after a date. This means the date changes from year to year.\n return [event[:6] + '.' + event[6:].strip(\"*\") for event in events]" ]
[ "0.7014175", "0.6851397", "0.66403925", "0.66294414", "0.66041636", "0.6558786", "0.6377986", "0.6361551", "0.6311977", "0.6311977", "0.61969876", "0.61969876", "0.60658556", "0.59038055", "0.55257285", "0.5499122", "0.5488041", "0.54850715", "0.5362488", "0.53586525", "0.53342855", "0.5293092", "0.5285512", "0.5251795", "0.51807547", "0.51807547", "0.5171278", "0.5132838", "0.5115117", "0.5105002" ]
0.73986226
0
The ID of the Amazon EMR Studio Engine security group. The Engine security group allows inbound network traffic from the Workspace security group, and it must be in the same VPC specified by `vpc_id`.
def engine_security_group_id(self) -> pulumi.Input[str]: return pulumi.get(self, "engine_security_group_id")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def engine_security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"engine_security_group_id\")", "def security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"security_group_id\")", "def engine_security_group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"engine_security_group_id\")", "def security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"security_group_id\")", "def workspace_security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_security_group_id\")", "def workspace_security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"workspace_security_group_id\")", "def workspace_security_group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"workspace_security_group_id\")", "def sg_lookup(session, vpc_id, group_name):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_security_groups(Filters=[{\"Name\": \"vpc-id\", \"Values\": [vpc_id]},\n {\"Name\": \"tag:Name\", \"Values\": [group_name]}])\n\n if len(response['SecurityGroups']) == 0:\n return None\n else:\n return response['SecurityGroups'][0]['GroupId']", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self):\n return self._vpc_id" ]
[ "0.68906003", "0.68042195", "0.68042195", "0.6783754", "0.6681128", "0.6681128", "0.6622851", "0.6622851", "0.65399075", "0.6496629", "0.6394589", "0.63780427", "0.63421786", "0.63421786", "0.63421786", "0.63421786", "0.63421786", "0.63421786", "0.62686855", "0.62686855", "0.62686855", "0.62686855", "0.62686855", "0.6239061", "0.6239061", "0.6239061", "0.6239061", "0.6239061", "0.6239061", "0.6166124" ]
0.7011863
0
The IAM role that the Amazon EMR Studio assumes. The service role provides a way for Amazon EMR Studio to interoperate with other Amazon Web Services services.
def service_role(self) -> pulumi.Input[str]: return pulumi.get(self, "service_role")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def service_role(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_role\")", "def service_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_role\")", "def service_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_role\")", "def role(self) -> aws_cdk.aws_iam.IRole:\n return self._values.get('role')", "def role(self) -> str:\n return pulumi.get(self, \"role\")", "def use_iam_role():\n session = Session(region_name='us-east-1')\n return session", "def role(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"role\")", "def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:\n return self._values.get(\"role\")", "def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:\n return self._values.get('role')", "def iam_role_arn(self) -> str:\n return pulumi.get(self, \"iam_role_arn\")", "def role_arn(self) -> str:\n return pulumi.get(self, \"role_arn\")", "def role(self):\n return self._role", "def role(self):\n return self._role", "def role(self):\n return self._role", "def get_role(self):\n return self.role", "def role(self):\n\n return self._role", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def role(self) -> str:\n\n assert self.data is not None\n return self.data[\"role\"][\"name\"]", "def role_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> Optional[str]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> Optional[str]:\n return pulumi.get(self, \"role_arn\")", "def _get_role(self):\n return self.__role", "def assume_role():\n try:\n return boto3.client('sts')\n except Exception as error:\n logger.info(\"Creating a boto client failed with the following error : {}\".format(error))", "def i_am(user_role):\n return user_role", "def role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_arn\")" ]
[ "0.75329775", "0.7217653", "0.7217653", "0.70248497", "0.7010139", "0.6921028", "0.69086546", "0.69047856", "0.6863307", "0.68553287", "0.6809839", "0.67654437", "0.67654437", "0.67654437", "0.67166924", "0.66412354", "0.66270715", "0.66270715", "0.66270715", "0.6617036", "0.6614689", "0.65675706", "0.6567537", "0.6567537", "0.65599304", "0.6536063", "0.6523087", "0.64937186", "0.64937186", "0.64937186" ]
0.74035543
1
The ID of the Amazon EMR Studio Workspace security group. The Workspace security group allows outbound network traffic to resources in the Engine security group, and it must be in the same VPC specified by `vpc_id`.
def workspace_security_group_id(self) -> pulumi.Input[str]: return pulumi.get(self, "workspace_security_group_id")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def workspace_security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"workspace_security_group_id\")", "def workspace_security_group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"workspace_security_group_id\")", "def security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"security_group_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def engine_security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"engine_security_group_id\")", "def engine_security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"engine_security_group_id\")", "def sg_lookup(session, vpc_id, group_name):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_security_groups(Filters=[{\"Name\": \"vpc-id\", \"Values\": [vpc_id]},\n {\"Name\": \"tag:Name\", \"Values\": [group_name]}])\n\n if len(response['SecurityGroups']) == 0:\n return None\n else:\n return response['SecurityGroups'][0]['GroupId']", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def vpc_id(self):\n return self._vpc_id", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> Optional[str]:\n return pulumi.get(self, \"vpc_id\")" ]
[ "0.72309697", "0.7065785", "0.67988247", "0.67988247", "0.6768797", "0.6768797", "0.6625691", "0.6625691", "0.64094466", "0.64094466", "0.64094466", "0.64094466", "0.64094466", "0.64094466", "0.638914", "0.6377499", "0.6354188", "0.6262482", "0.6262482", "0.6262482", "0.6262482", "0.6262482", "0.62619233", "0.625823", "0.625823", "0.625823", "0.625823", "0.625823", "0.625823", "0.61999357" ]
0.7212722
1
The authentication endpoint of your identity provider (IdP). Specify this value when you use IAM authentication and want to let federated users log in to a Studio with the Studio URL and credentials from your IdP. Amazon EMR Studio redirects users to this endpoint to enter credentials.
def idp_auth_url(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "idp_auth_url")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def idp_auth_url(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"idp_auth_url\")", "def auth_url(self):\n\n return \"{}?client_id={}&redirect_uri={}&scope={}&state={}\".format(AUTH_ENDPOINT, self.client_id,\\\n self.redirect_uri, self.scope, self.state)", "def auth(self) -> Optional[pulumi.Input['IstioConfigAuth']]:\n return pulumi.get(self, \"auth\")", "def identity_provider(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"identity_provider\")", "def auth_url(self):\n return_url = self.redirect_uri\n nonce = self.strategy.random_string(64)\n self.add_nonce(nonce)\n\n payload = urlencode({\"nonce\": nonce, \"return_sso_url\": return_url})\n base_64_payload = urlsafe_b64encode(payload.encode(\"utf8\")).decode(\"ascii\")\n\n payload_signature = hmac.new(\n self.setting(\"SECRET\").encode(\"utf8\"),\n base_64_payload.encode(\"utf8\"),\n sha256,\n ).hexdigest()\n encoded_params = urlencode({\"sso\": base_64_payload, \"sig\": payload_signature})\n return f\"{self.get_idp_url()}?{encoded_params}\"", "def login_uri(self) -> str:\n return pulumi.get(self, \"login_uri\")", "def auth_code_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"auth_code_url\")", "def datasource_auth_credentials(self) -> Optional[pulumi.Input['SecretStoreBasedAuthCredentialsArgs']]:\n return pulumi.get(self, \"datasource_auth_credentials\")", "def login_uri(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"login_uri\")", "def datasource_auth_credentials(self) -> Optional['outputs.SecretStoreBasedAuthCredentialsResponse']:\n return pulumi.get(self, \"datasource_auth_credentials\")", "def multi_factor_auth_provider(self) -> str:\n return pulumi.get(self, \"multi_factor_auth_provider\")", "def auth_server_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"auth_server_id\")", "def authenticate(self):\n self.login(closet.app.config['USERNAME'],\n closet.app.config['PASSWORD'])", "def identity_provider_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"identity_provider_name\")", "def login_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"login_uri\")", "def login_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"login_uri\")", "def getauthenticationurl():\n\n # read application keys from env variables\n appid, appsecret, redirecturi = readappkeys()\n\n # generate random state string for request (for security)\n state = ''.join(random.choice(string.ascii_lowercase + string.digits \n + string.ascii_uppercase) for i in range(20))\n\n # need access to public and private playlists\n scope = (\"playlist-read-private playlist-read-collaborative\" \n + \" playlist-modify-public playlist-modify-private\")\n\n # load request parameters\n payload = {}\n payload[\"client_id\"] = appid\n payload[\"response_type\"] = \"code\"\n payload[\"redirect_uri\"] = redirecturi\n payload[\"state\"] = state\n payload[\"scope\"] = scope\n\n # create URL\n r = requests.Request(\"GET\",\n \"https://accounts.spotify.com/authorize/\",\n params=payload)\n prepped = r.prepare()\n\n # return state and url\n if state and prepped.url:\n # print(\"prepped.url: {}\".format(prepped.url))\n return(state, prepped.url)\n else:\n return(None, None)", "def get_sp_auth_url(self, session, sp_id, **kwargs):\n return None", "def identity_provider_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"identity_provider_name\")", "def idp_metadata_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"idp_metadata_url\")", "def idp_metadata_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"idp_metadata_url\")", "def auth_config_id(self) -> Sequence[str]:\n return pulumi.get(self, \"auth_config_id\")", "def service_auth(self) -> Optional[pulumi.Input['ServiceAuthConfigurationArgs']]:\n return pulumi.get(self, \"service_auth\")", "def initiateAuthentication(identity_url, return_to=None):", "def auth_server_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_server_id\")", "def auth(self):\n return self.creds(\"[email protected]\", cookie=\"USERTOKEN: authcookie\")", "def auth_server_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"auth_server_id\")", "def authorization_url(self): # pragma: no cover\n raise NotImplementedError()", "def auth_config(self):\n return {\n 'login_url': self.uri_for('login'),\n 'logout_url': self.uri_for('logout')\n }", "def auth_mode(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"auth_mode\")" ]
[ "0.73342144", "0.61421776", "0.59861565", "0.5918602", "0.58944696", "0.56724465", "0.5659786", "0.5592917", "0.55907595", "0.55823594", "0.55823416", "0.5576239", "0.55256826", "0.5524355", "0.55210763", "0.55210763", "0.5517178", "0.55091786", "0.5467416", "0.5406468", "0.5406468", "0.537967", "0.53787386", "0.53748184", "0.53585744", "0.53346306", "0.5327957", "0.53216434", "0.5293713", "0.52918005" ]
0.7340292
1
The name that your identity provider (IdP) uses for its RelayState parameter. For example, RelayState or TargetSource. Specify this value when you use IAM authentication and want to let federated users log in to a Studio using the Studio URL. The RelayState parameter differs by IdP.
def idp_relay_state_parameter_name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "idp_relay_state_parameter_name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def idp_relay_state_parameter_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"idp_relay_state_parameter_name\")", "def relay_state(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"relay_state\")", "def relay_state(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"relay_state\")", "def state_name(self):\n return self.state.name if self.state else None", "def relay_state(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"relay_state\")", "def _state_name(self):\n return '{}_{}'.format(self.function_name, self.STATE_CONFIG_SUFFIX)", "def identity_provider_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"identity_provider_name\")", "def relay_state(self,name):\n state = self.circles[name].get_info()['relay_state']\n self.cursor.execute(\"\"\"UPDATE sensors_powersensor SET state=%s WHERE target=%s\"\"\", (state, name))\n return state", "def state(self, state: str) -> None:", "def name(self) -> Text:\n\n return \"5knn_state\"", "def identity_provider_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"identity_provider_name\")", "def state_id(self):\n return self._state_id", "def state(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"state\")", "def state_pk(self):", "def identity_provider_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"identity_provider_name\")", "def state(self) -> Optional[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> Optional[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> Optional[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> Optional[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> Optional[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> Optional[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> Optional[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> Optional[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> Optional[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> Optional[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> Optional[str]:\n return pulumi.get(self, \"state\")", "def state_name(self):\n return task_states.to_str(self.state)", "def _initial_state_scope(self, name):\n return name if name else \"%s_initial_state\" % self.scope_name", "def state(self) -> str:\n return pulumi.get(self, \"state\")", "def state(self) -> str:\n return pulumi.get(self, \"state\")" ]
[ "0.7687181", "0.59368676", "0.59368676", "0.56802654", "0.5628786", "0.5575582", "0.5529944", "0.54963726", "0.533518", "0.52946424", "0.5269813", "0.51703966", "0.51690614", "0.5145018", "0.5112524", "0.50896215", "0.50896215", "0.50896215", "0.50896215", "0.50896215", "0.50896215", "0.50896215", "0.50896215", "0.50896215", "0.50896215", "0.50896215", "0.5066625", "0.5036126", "0.49700874", "0.49700874" ]
0.7885242
0
The IAM user role that users and groups assume when logged in to an Amazon EMR Studio. Only specify a User Role when you use Amazon Web Services SSO authentication. The permissions attached to the User Role can be scoped down for each user or group using session policies.
def user_role(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "user_role")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def user_role(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"user_role\")", "def getUserRole(self):\n\n # general question concerning the user's role (hersteller, beauftragter...)\n self.roleView.getUserRole()", "def role(self) -> aws_cdk.aws_iam.IRole:\n return self._values.get('role')", "def role(self):\r\n roles = {\r\n 'student': u'Student',\r\n 'staff': u'Administrator',\r\n 'instructor': u'Instructor',\r\n }\r\n return roles.get(self.system.get_user_role(), u'Student')", "def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:\n return self._values.get(\"role\")", "def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:\n return self._values.get('role')", "def role(self) -> str:\n return pulumi.get(self, \"role\")", "def get_role(self):\n return self.role", "def get_user_role():\n\n if session['user_role'] == 'student':\n return student\n elif session['user_role'] == 'tutor':\n return tutor\n else:\n raise Exception(\"User is not student or tutor. Who is user?\")", "def role(self):\n return self._role", "def role(self):\n return self._role", "def role(self):\n return self._role", "def role(self):\n\n return self._role", "def assume_role(role_arn):\n sts = boto3.client('sts')\n role_data = sts.assume_role(\n RoleArn=role_arn, RoleSessionName=\"assume_role_name\")\n creds = role_data['Credentials']\n return creds", "def assign_user_role(self, project_id, user_id, role_id):\n resp, body = self.put('projects/%s/users/%s/roles/%s' %\n (project_id, user_id, role_id), None)\n self.expected_success(204, resp.status)\n return service_client.ResponseBody(resp, body)", "def get_user_roles(user=None):\n if user is None:\n user = g.user\n return user.roles", "def i_am(user_role):\n return user_role", "def show_user_role(instance_id):\n client = get_client()\n\n return client.get_current_user_role(instance_id=instance_id,\n custom_headers=get_custom_headers())", "def user_roles():\n access_token = _request_ctx_stack.top.current_user_token\n message_log(\"Got access token for user roles\")\n user_roles = get_user_roles(access_token)\n return json.dumps(list(user_roles))", "def get_user_role(user, course_key):\r\n if is_masquerading_as_student(user):\r\n return 'student'\r\n elif has_access(user, 'instructor', course_key):\r\n return 'instructor'\r\n elif has_access(user, 'staff', course_key):\r\n return 'staff'\r\n else:\r\n return 'student'", "def role_arn(self) -> str:\n return pulumi.get(self, \"role_arn\")", "def role(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"role\")", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def assign_user_role_on_project(self, project_id, user_id, role_id):\n resp, body = self.put('projects/%s/users/%s/roles/%s' %\n (project_id, user_id, role_id), None)\n self.expected_success(204, resp.status)\n return service_client.ResponseBody(resp, body)", "def iam_role_arn(self) -> str:\n return pulumi.get(self, \"iam_role_arn\")", "def role_arn(self) -> Optional[str]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> Optional[str]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"role_arn\")" ]
[ "0.7068165", "0.66357064", "0.6477955", "0.6413195", "0.6339574", "0.6304233", "0.62071526", "0.6118527", "0.61078304", "0.60794014", "0.60794014", "0.60794014", "0.603479", "0.603247", "0.6029019", "0.59861964", "0.5954648", "0.595334", "0.5868651", "0.5853171", "0.58490384", "0.58210117", "0.5773206", "0.5773206", "0.5773206", "0.5765921", "0.57407916", "0.5671021", "0.5671021", "0.5654012" ]
0.7068885
1
The authentication endpoint of your identity provider (IdP). Specify this value when you use IAM authentication and want to let federated users log in to a Studio with the Studio URL and credentials from your IdP. Amazon EMR Studio redirects users to this endpoint to enter credentials.
def idp_auth_url(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "idp_auth_url")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def idp_auth_url(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"idp_auth_url\")", "def auth_url(self):\n\n return \"{}?client_id={}&redirect_uri={}&scope={}&state={}\".format(AUTH_ENDPOINT, self.client_id,\\\n self.redirect_uri, self.scope, self.state)", "def auth(self) -> Optional[pulumi.Input['IstioConfigAuth']]:\n return pulumi.get(self, \"auth\")", "def identity_provider(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"identity_provider\")", "def auth_url(self):\n return_url = self.redirect_uri\n nonce = self.strategy.random_string(64)\n self.add_nonce(nonce)\n\n payload = urlencode({\"nonce\": nonce, \"return_sso_url\": return_url})\n base_64_payload = urlsafe_b64encode(payload.encode(\"utf8\")).decode(\"ascii\")\n\n payload_signature = hmac.new(\n self.setting(\"SECRET\").encode(\"utf8\"),\n base_64_payload.encode(\"utf8\"),\n sha256,\n ).hexdigest()\n encoded_params = urlencode({\"sso\": base_64_payload, \"sig\": payload_signature})\n return f\"{self.get_idp_url()}?{encoded_params}\"", "def login_uri(self) -> str:\n return pulumi.get(self, \"login_uri\")", "def auth_code_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"auth_code_url\")", "def datasource_auth_credentials(self) -> Optional[pulumi.Input['SecretStoreBasedAuthCredentialsArgs']]:\n return pulumi.get(self, \"datasource_auth_credentials\")", "def login_uri(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"login_uri\")", "def datasource_auth_credentials(self) -> Optional['outputs.SecretStoreBasedAuthCredentialsResponse']:\n return pulumi.get(self, \"datasource_auth_credentials\")", "def multi_factor_auth_provider(self) -> str:\n return pulumi.get(self, \"multi_factor_auth_provider\")", "def auth_server_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"auth_server_id\")", "def authenticate(self):\n self.login(closet.app.config['USERNAME'],\n closet.app.config['PASSWORD'])", "def identity_provider_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"identity_provider_name\")", "def login_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"login_uri\")", "def login_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"login_uri\")", "def getauthenticationurl():\n\n # read application keys from env variables\n appid, appsecret, redirecturi = readappkeys()\n\n # generate random state string for request (for security)\n state = ''.join(random.choice(string.ascii_lowercase + string.digits \n + string.ascii_uppercase) for i in range(20))\n\n # need access to public and private playlists\n scope = (\"playlist-read-private playlist-read-collaborative\" \n + \" playlist-modify-public playlist-modify-private\")\n\n # load request parameters\n payload = {}\n payload[\"client_id\"] = appid\n payload[\"response_type\"] = \"code\"\n payload[\"redirect_uri\"] = redirecturi\n payload[\"state\"] = state\n payload[\"scope\"] = scope\n\n # create URL\n r = requests.Request(\"GET\",\n \"https://accounts.spotify.com/authorize/\",\n params=payload)\n prepped = r.prepare()\n\n # return state and url\n if state and prepped.url:\n # print(\"prepped.url: {}\".format(prepped.url))\n return(state, prepped.url)\n else:\n return(None, None)", "def get_sp_auth_url(self, session, sp_id, **kwargs):\n return None", "def identity_provider_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"identity_provider_name\")", "def idp_metadata_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"idp_metadata_url\")", "def idp_metadata_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"idp_metadata_url\")", "def service_auth(self) -> Optional[pulumi.Input['ServiceAuthConfigurationArgs']]:\n return pulumi.get(self, \"service_auth\")", "def auth_config_id(self) -> Sequence[str]:\n return pulumi.get(self, \"auth_config_id\")", "def initiateAuthentication(identity_url, return_to=None):", "def auth_server_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_server_id\")", "def auth(self):\n return self.creds(\"[email protected]\", cookie=\"USERTOKEN: authcookie\")", "def auth_server_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"auth_server_id\")", "def authorization_url(self): # pragma: no cover\n raise NotImplementedError()", "def auth_config(self):\n return {\n 'login_url': self.uri_for('login'),\n 'logout_url': self.uri_for('logout')\n }", "def auth_mode(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"auth_mode\")" ]
[ "0.733513", "0.61433494", "0.5987851", "0.5917364", "0.58954287", "0.5673445", "0.56609595", "0.55953974", "0.5591695", "0.55847", "0.558439", "0.55766195", "0.5527152", "0.5522925", "0.55222666", "0.55222666", "0.5518764", "0.55106723", "0.5466359", "0.54050314", "0.54050314", "0.5382189", "0.5381047", "0.53749883", "0.5359339", "0.53366625", "0.5328409", "0.5321981", "0.5295973", "0.52953035" ]
0.7341372
0
The name that your identity provider (IdP) uses for its RelayState parameter. For example, RelayState or TargetSource. Specify this value when you use IAM authentication and want to let federated users log in to a Studio using the Studio URL. The RelayState parameter differs by IdP.
def idp_relay_state_parameter_name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "idp_relay_state_parameter_name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def idp_relay_state_parameter_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"idp_relay_state_parameter_name\")", "def relay_state(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"relay_state\")", "def relay_state(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"relay_state\")", "def state_name(self):\n return self.state.name if self.state else None", "def relay_state(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"relay_state\")", "def _state_name(self):\n return '{}_{}'.format(self.function_name, self.STATE_CONFIG_SUFFIX)", "def identity_provider_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"identity_provider_name\")", "def relay_state(self,name):\n state = self.circles[name].get_info()['relay_state']\n self.cursor.execute(\"\"\"UPDATE sensors_powersensor SET state=%s WHERE target=%s\"\"\", (state, name))\n return state", "def state(self, state: str) -> None:", "def name(self) -> Text:\n\n return \"5knn_state\"", "def identity_provider_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"identity_provider_name\")", "def state_id(self):\n return self._state_id", "def state(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"state\")", "def state_pk(self):", "def identity_provider_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"identity_provider_name\")", "def state(self) -> Optional[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> Optional[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> Optional[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> Optional[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> Optional[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> Optional[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> Optional[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> Optional[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> Optional[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> Optional[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> Optional[str]:\n return pulumi.get(self, \"state\")", "def state_name(self):\n return task_states.to_str(self.state)", "def _initial_state_scope(self, name):\n return name if name else \"%s_initial_state\" % self.scope_name", "def state(self) -> str:\n return pulumi.get(self, \"state\")", "def state(self) -> str:\n return pulumi.get(self, \"state\")" ]
[ "0.768848", "0.593493", "0.593493", "0.5682287", "0.562658", "0.5578043", "0.5532136", "0.549397", "0.5336049", "0.5295124", "0.52722514", "0.5171025", "0.5170942", "0.5145687", "0.51145095", "0.5092392", "0.5092392", "0.5092392", "0.5092392", "0.5092392", "0.5092392", "0.5092392", "0.5092392", "0.5092392", "0.5092392", "0.5092392", "0.5068688", "0.5037265", "0.49721316", "0.49721316" ]
0.7886542
1
The IAM user role that users and groups assume when logged in to an Amazon EMR Studio. Only specify a User Role when you use Amazon Web Services SSO authentication. The permissions attached to the User Role can be scoped down for each user or group using session policies.
def user_role(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "user_role")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def user_role(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"user_role\")", "def getUserRole(self):\n\n # general question concerning the user's role (hersteller, beauftragter...)\n self.roleView.getUserRole()", "def role(self) -> aws_cdk.aws_iam.IRole:\n return self._values.get('role')", "def role(self):\r\n roles = {\r\n 'student': u'Student',\r\n 'staff': u'Administrator',\r\n 'instructor': u'Instructor',\r\n }\r\n return roles.get(self.system.get_user_role(), u'Student')", "def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:\n return self._values.get(\"role\")", "def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:\n return self._values.get('role')", "def role(self) -> str:\n return pulumi.get(self, \"role\")", "def get_role(self):\n return self.role", "def get_user_role():\n\n if session['user_role'] == 'student':\n return student\n elif session['user_role'] == 'tutor':\n return tutor\n else:\n raise Exception(\"User is not student or tutor. Who is user?\")", "def role(self):\n return self._role", "def role(self):\n return self._role", "def role(self):\n return self._role", "def role(self):\n\n return self._role", "def assume_role(role_arn):\n sts = boto3.client('sts')\n role_data = sts.assume_role(\n RoleArn=role_arn, RoleSessionName=\"assume_role_name\")\n creds = role_data['Credentials']\n return creds", "def assign_user_role(self, project_id, user_id, role_id):\n resp, body = self.put('projects/%s/users/%s/roles/%s' %\n (project_id, user_id, role_id), None)\n self.expected_success(204, resp.status)\n return service_client.ResponseBody(resp, body)", "def get_user_roles(user=None):\n if user is None:\n user = g.user\n return user.roles", "def i_am(user_role):\n return user_role", "def show_user_role(instance_id):\n client = get_client()\n\n return client.get_current_user_role(instance_id=instance_id,\n custom_headers=get_custom_headers())", "def user_roles():\n access_token = _request_ctx_stack.top.current_user_token\n message_log(\"Got access token for user roles\")\n user_roles = get_user_roles(access_token)\n return json.dumps(list(user_roles))", "def get_user_role(user, course_key):\r\n if is_masquerading_as_student(user):\r\n return 'student'\r\n elif has_access(user, 'instructor', course_key):\r\n return 'instructor'\r\n elif has_access(user, 'staff', course_key):\r\n return 'staff'\r\n else:\r\n return 'student'", "def role_arn(self) -> str:\n return pulumi.get(self, \"role_arn\")", "def role(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"role\")", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def assign_user_role_on_project(self, project_id, user_id, role_id):\n resp, body = self.put('projects/%s/users/%s/roles/%s' %\n (project_id, user_id, role_id), None)\n self.expected_success(204, resp.status)\n return service_client.ResponseBody(resp, body)", "def iam_role_arn(self) -> str:\n return pulumi.get(self, \"iam_role_arn\")", "def role_arn(self) -> Optional[str]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> Optional[str]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"role_arn\")" ]
[ "0.7068214", "0.66350424", "0.64774257", "0.64126813", "0.63391656", "0.6303802", "0.620743", "0.61188483", "0.6108174", "0.6079462", "0.6079462", "0.6079462", "0.6034469", "0.6032965", "0.6026925", "0.5984384", "0.5955966", "0.59547716", "0.5867706", "0.5852826", "0.5848336", "0.5821335", "0.5773713", "0.5773713", "0.5773713", "0.57640165", "0.57401574", "0.56706715", "0.56706715", "0.56534487" ]
0.7069238
0
Get an existing Studio resource's state with the given name, id, and optional extra properties used to qualify the lookup.
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, arn: Optional[pulumi.Input[str]] = None, auth_mode: Optional[pulumi.Input[str]] = None, default_s3_location: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, engine_security_group_id: Optional[pulumi.Input[str]] = None, idp_auth_url: Optional[pulumi.Input[str]] = None, idp_relay_state_parameter_name: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, service_role: Optional[pulumi.Input[str]] = None, subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, url: Optional[pulumi.Input[str]] = None, user_role: Optional[pulumi.Input[str]] = None, vpc_id: Optional[pulumi.Input[str]] = None, workspace_security_group_id: Optional[pulumi.Input[str]] = None) -> 'Studio': opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _StudioState.__new__(_StudioState) __props__.__dict__["arn"] = arn __props__.__dict__["auth_mode"] = auth_mode __props__.__dict__["default_s3_location"] = default_s3_location __props__.__dict__["description"] = description __props__.__dict__["engine_security_group_id"] = engine_security_group_id __props__.__dict__["idp_auth_url"] = idp_auth_url __props__.__dict__["idp_relay_state_parameter_name"] = idp_relay_state_parameter_name __props__.__dict__["name"] = name __props__.__dict__["service_role"] = service_role __props__.__dict__["subnet_ids"] = subnet_ids __props__.__dict__["tags"] = tags __props__.__dict__["tags_all"] = tags_all __props__.__dict__["url"] = url __props__.__dict__["user_role"] = user_role __props__.__dict__["vpc_id"] = vpc_id __props__.__dict__["workspace_security_group_id"] = workspace_security_group_id return Studio(resource_name, opts=opts, __props__=__props__)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_state_by_id(state_id):\n state = storage.get(State, state_id)\n if not state:\n abort(404)\n return jsonify(state.to_dict()), 200", "def get_state_by_id(state_id):\n my_state = storage.get('State', state_id)\n if my_state is None:\n abort(404)\n return jsonify(my_state.to_dict())", "def get_state_by_id(state_id):\n for key, value in storage.all(\"State\").items():\n if state_id == value.id:\n return jsonify(value.to_dict())\n abort(404)", "def get_state_by_name(exploration_id, state_name, strict=True):\n exploration = get_exploration_by_id(exploration_id)\n assert state_name\n\n # TODO(sll): This is too slow; improve it.\n state = None\n for candidate_state in exploration.states:\n if candidate_state.name == state_name:\n state = candidate_state\n break\n\n if strict and not state:\n raise Exception('State %s not found' % state_name)\n return state", "def get_state(state_id):\n try:\n ''' Check that state_id exists '''\n query = State.select().where(State.id == state_id)\n if not query.exists():\n raise LookupError('state_id')\n\n state = State.get(State.id == state_id)\n return state.to_dict(), 200\n except LookupError as e:\n abort(404)\n except Exception as e:\n abort(500)", "def state_by_id(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n return jsonify(state.to_dict())", "def state_by_id(state_id):\n states_values = storage.all(\"State\").values()\n for obj in states_values:\n if obj.id == state_id:\n return jsonify(obj.to_dict())\n abort(404)", "def a_state(id):\n state = storage.get(State, id)\n if state is not None:\n return jsonify(state.to_dict())\n abort(404)", "def get_state_by_id(state_id):\r\n response = Response(json.dumps(json_error(ResponsesREST.INVALID_INPUT.value)),\r\n status=ResponsesREST.INVALID_INPUT.value, mimetype=\"application/json\")\r\n if validator_id.is_valid({\"id\": state_id}):\r\n state_get = State()\r\n state_get.id_state = state_id\r\n result = state_get.get_state()\r\n if result in (ResponsesREST.NOT_FOUND.value, ResponsesREST.SERVER_ERROR.value):\r\n response = Response(json.dumps(json_error(result)),\r\n status=result, mimetype=\"application/json\")\r\n else:\r\n response = Response(json.dumps(result.json_state()),\r\n status=ResponsesREST.SUCCESSFUL.value,\r\n mimetype=\"application/json\")\r\n return response", "def get(self, request, state_id, format=None):\n try:\n state = State.objects.get(id=state_id)\n except ObjectDoesNotExist:\n raise NotFound(detail=\"State not found\")\n\n return Response(StateSerializer(state).data)", "def get_one_state(state_id):\n state = storage.get('State', state_id)\n if state is None:\n abort(404)\n if request.method == 'DELETE':\n storage.delete(state)\n storage.save()\n return jsonify({}), 200\n elif request.method == 'PUT':\n try:\n res_dict = request.get_json()\n res_dict['id'] = state.id\n res_dict['created_at'] = state.created_at\n state.__init__(**res_dict)\n state.save()\n return jsonify(state.to_dict()), 200\n except:\n abort(400, description='Not a JSON')\n return jsonify(state.to_dict())", "def get_state(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n return jsonify(state.to_dict())", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n force: Optional[pulumi.Input[bool]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n state: Optional[pulumi.Input[str]] = None) -> 'InstanceState':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceStateState.__new__(_InstanceStateState)\n\n __props__.__dict__[\"force\"] = force\n __props__.__dict__[\"instance_id\"] = instance_id\n __props__.__dict__[\"state\"] = state\n return InstanceState(resource_name, opts=opts, __props__=__props__)", "def get_state(state_id):\n try:\n state = jsonify(storage.get(State, state_id).to_dict())\n return state\n except:\n abort(404)", "def get_state(state_id):\n state = storage.get(\"State\", state_id)\n if state:\n return jsonify(state.to_dict())\n abort(404)", "def statesById(state_id):\n obj = storage.get(State, state_id)\n if obj:\n return jsonify(obj.to_dict())\n return jsonify({\"error\": \"Not found\"}), 404", "def a_states_id(state_id):\n i = storage.get(\"State\", state_id)\n if i:\n return jsonify(i.to_dict())\n else:\n return (jsonify({\"error\": \"Not found\"}), 404)", "def given_state(id):\n key = 'State.{}'.format(id)\n state = storage.all(State).get(key)\n return render_template('9-states.html', states=state)", "def get_state_by_id(states: [State], state_id: str, id_type: str) -> State:\n if id_type == 'new':\n for state in states:\n if state.new_id == state_id:\n return state\n if id_type == 'old':\n for state in states:\n if state.id == state_id:\n return state\n return states[0]", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'FhirStore':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = FhirStoreArgs.__new__(FhirStoreArgs)\n\n __props__.__dict__[\"complex_data_type_reference_parsing\"] = None\n __props__.__dict__[\"dataset_id\"] = None\n __props__.__dict__[\"default_search_handling_strict\"] = None\n __props__.__dict__[\"disable_referential_integrity\"] = None\n __props__.__dict__[\"disable_resource_versioning\"] = None\n __props__.__dict__[\"enable_update_create\"] = None\n __props__.__dict__[\"fhir_store_id\"] = None\n __props__.__dict__[\"labels\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"notification_config\"] = None\n __props__.__dict__[\"notification_configs\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"stream_configs\"] = None\n __props__.__dict__[\"validation_config\"] = None\n __props__.__dict__[\"version\"] = None\n return FhirStore(resource_name, opts=opts, __props__=__props__)", "def state_by_id(id):\n states = storage.all('State').values()\n for state in states:\n if state.id == id:\n return render_template('9-states.html', states=state)\n return render_template('9-states.html')", "def state_id(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n else:\n return jsonify(state.to_dict())", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n application_insights_id: Optional[pulumi.Input[str]] = None,\n container_registry_id: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n discovery_url: Optional[pulumi.Input[str]] = None,\n encryption: Optional[pulumi.Input[pulumi.InputType['WorkspaceEncryptionArgs']]] = None,\n friendly_name: Optional[pulumi.Input[str]] = None,\n high_business_impact: Optional[pulumi.Input[bool]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['WorkspaceIdentityArgs']]] = None,\n image_build_compute_name: Optional[pulumi.Input[str]] = None,\n key_vault_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n primary_user_assigned_identity: Optional[pulumi.Input[str]] = None,\n public_access_behind_virtual_network_enabled: Optional[pulumi.Input[bool]] = None,\n public_network_access_enabled: Optional[pulumi.Input[bool]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n sku_name: Optional[pulumi.Input[str]] = None,\n storage_account_id: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n v1_legacy_mode_enabled: Optional[pulumi.Input[bool]] = None,\n workspace_id: Optional[pulumi.Input[str]] = None) -> 'Workspace':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _WorkspaceState.__new__(_WorkspaceState)\n\n __props__.__dict__[\"application_insights_id\"] = application_insights_id\n __props__.__dict__[\"container_registry_id\"] = container_registry_id\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"discovery_url\"] = discovery_url\n __props__.__dict__[\"encryption\"] = encryption\n __props__.__dict__[\"friendly_name\"] = friendly_name\n __props__.__dict__[\"high_business_impact\"] = high_business_impact\n __props__.__dict__[\"identity\"] = identity\n __props__.__dict__[\"image_build_compute_name\"] = image_build_compute_name\n __props__.__dict__[\"key_vault_id\"] = key_vault_id\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"primary_user_assigned_identity\"] = primary_user_assigned_identity\n __props__.__dict__[\"public_access_behind_virtual_network_enabled\"] = public_access_behind_virtual_network_enabled\n __props__.__dict__[\"public_network_access_enabled\"] = public_network_access_enabled\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"sku_name\"] = sku_name\n __props__.__dict__[\"storage_account_id\"] = storage_account_id\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"v1_legacy_mode_enabled\"] = v1_legacy_mode_enabled\n __props__.__dict__[\"workspace_id\"] = workspace_id\n return Workspace(resource_name, opts=opts, __props__=__props__)", "def get_state_by_id(exploration_id, state_id, strict=True):\n # TODO(sll): Generalize this to handle multiple state_ids at a time.\n state_memcache_key = _get_state_memcache_key(exploration_id, state_id)\n memcached_state = memcache_services.get_multi(\n [state_memcache_key]).get(state_memcache_key)\n\n if memcached_state is not None:\n return memcached_state\n else:\n state_model = exp_models.StateModel.get(\n exploration_id, state_id, strict=strict)\n if state_model:\n state = exp_domain.State.from_dict(state_id, state_model.value)\n memcache_services.set_multi({state_memcache_key: state})\n return state\n else:\n return None", "def view_state_id(state_id):\n states_obj = storage.all(\"State\")\n if request.method == 'GET':\n for state in states_obj.values():\n if state.id == state_id:\n id_found = state.to_dict()\n return jsonify(id_found)\n abort(404)\n\n if request.method == 'DELETE':\n for state in states_obj.values():\n if state.id == state_id:\n storage.delete(state)\n storage.save()\n return make_response(jsonify({}), 200)\n abort(404)\n\n if request.method == 'PUT':\n key = \"State.\" + state_id\n states = storage.all(\"State\")\n instance = states.get(key)\n if instance is None:\n abort(404)\n else:\n if not request.json:\n abort(400, \"Not a JSON\")\n req_var = request.get_json()\n for key, value in req_var.items():\n setattr(instance, key, value)\n storage.save()\n return make_response(jsonify(instance.to_dict()), 200)", "def states_id(id=None):\n all_states = storage.all(State)\n foundstate = None\n for key, state in all_states.items():\n if state.id == id:\n foundstate = state\n break\n\n return render_template('9-states.html', States=all_states, ID=id,\n Stateobj=foundstate)", "def states_by_id(id):\n list_states = storage.all('State')\n state_id = 'State.{}'.format(id)\n if state_id in list_states:\n list_states = list_states[state_id]\n else:\n list_states = None\n return render_template('9-states.html', list_states=list_states)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n minimal_action: Optional[pulumi.Input[str]] = None,\n most_disruptive_allowed_action: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n preserved_state: Optional[pulumi.Input[pulumi.InputType['RegionPerInstanceConfigPreservedStateArgs']]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n region_instance_group_manager: Optional[pulumi.Input[str]] = None,\n remove_instance_state_on_destroy: Optional[pulumi.Input[bool]] = None) -> 'RegionPerInstanceConfig':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _RegionPerInstanceConfigState.__new__(_RegionPerInstanceConfigState)\n\n __props__.__dict__[\"minimal_action\"] = minimal_action\n __props__.__dict__[\"most_disruptive_allowed_action\"] = most_disruptive_allowed_action\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"preserved_state\"] = preserved_state\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"region\"] = region\n __props__.__dict__[\"region_instance_group_manager\"] = region_instance_group_manager\n __props__.__dict__[\"remove_instance_state_on_destroy\"] = remove_instance_state_on_destroy\n return RegionPerInstanceConfig(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n asset_statuses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ZoneAssetStatusArgs']]]]] = None,\n create_time: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n discovery_spec: Optional[pulumi.Input[pulumi.InputType['ZoneDiscoverySpecArgs']]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n lake: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n resource_spec: Optional[pulumi.Input[pulumi.InputType['ZoneResourceSpecArgs']]] = None,\n state: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n uid: Optional[pulumi.Input[str]] = None,\n update_time: Optional[pulumi.Input[str]] = None) -> 'Zone':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ZoneState.__new__(_ZoneState)\n\n __props__.__dict__[\"asset_statuses\"] = asset_statuses\n __props__.__dict__[\"create_time\"] = create_time\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"discovery_spec\"] = discovery_spec\n __props__.__dict__[\"display_name\"] = display_name\n __props__.__dict__[\"labels\"] = labels\n __props__.__dict__[\"lake\"] = lake\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"resource_spec\"] = resource_spec\n __props__.__dict__[\"state\"] = state\n __props__.__dict__[\"type\"] = type\n __props__.__dict__[\"uid\"] = uid\n __props__.__dict__[\"update_time\"] = update_time\n return Zone(resource_name, opts=opts, __props__=__props__)", "def get_sample_state_by_id():\n ids = [] # type: list\n if demisto.getArg('ids'):\n ids += argToList(demisto.getArg('ids'))\n if demisto.getArg('id'):\n ids.append(demisto.getArg('id'))\n response = get_sample_state_helper(ids)\n md = tableToMarkdown('ThreatGrid - Sample state', response['samples'], ['ID', 'State'])\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.Sample(val.ID == obj.ID)': response['samples']},\n 'HumanReadable': md,\n 'ContentsFormat': formats['json'],\n 'Contents': response['requests']\n })" ]
[ "0.62434953", "0.6238258", "0.61616653", "0.61550075", "0.6134168", "0.61179656", "0.6100013", "0.6011708", "0.59448004", "0.5883692", "0.5881919", "0.58268267", "0.5796979", "0.57796544", "0.5776168", "0.56969273", "0.5687934", "0.5654655", "0.56533", "0.5652628", "0.56508577", "0.56368095", "0.5549419", "0.55074567", "0.5475935", "0.5474917", "0.54556537", "0.54433745", "0.5409079", "0.53290987" ]
0.71403503
0
The ID of the Amazon EMR Studio Engine security group. The Engine security group allows inbound network traffic from the Workspace security group, and it must be in the same VPC specified by `vpc_id`.
def engine_security_group_id(self) -> pulumi.Output[str]: return pulumi.get(self, "engine_security_group_id")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def engine_security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"engine_security_group_id\")", "def security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"security_group_id\")", "def engine_security_group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"engine_security_group_id\")", "def security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"security_group_id\")", "def workspace_security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_security_group_id\")", "def workspace_security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"workspace_security_group_id\")", "def workspace_security_group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"workspace_security_group_id\")", "def sg_lookup(session, vpc_id, group_name):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_security_groups(Filters=[{\"Name\": \"vpc-id\", \"Values\": [vpc_id]},\n {\"Name\": \"tag:Name\", \"Values\": [group_name]}])\n\n if len(response['SecurityGroups']) == 0:\n return None\n else:\n return response['SecurityGroups'][0]['GroupId']", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self):\n return self._vpc_id" ]
[ "0.7013316", "0.68066067", "0.68066067", "0.67850715", "0.6683846", "0.6683846", "0.66251224", "0.66251224", "0.6543493", "0.6500523", "0.6398078", "0.63792205", "0.63436174", "0.63436174", "0.63436174", "0.63436174", "0.63436174", "0.63436174", "0.62710613", "0.62710613", "0.62710613", "0.62710613", "0.62710613", "0.62399656", "0.62399656", "0.62399656", "0.62399656", "0.62399656", "0.62399656", "0.6167102" ]
0.6892434
1
The IAM role that the Amazon EMR Studio assumes. The service role provides a way for Amazon EMR Studio to interoperate with other Amazon Web Services services.
def service_role(self) -> pulumi.Output[str]: return pulumi.get(self, "service_role")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def service_role(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"service_role\")", "def service_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_role\")", "def service_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_role\")", "def role(self) -> aws_cdk.aws_iam.IRole:\n return self._values.get('role')", "def role(self) -> str:\n return pulumi.get(self, \"role\")", "def use_iam_role():\n session = Session(region_name='us-east-1')\n return session", "def role(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"role\")", "def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:\n return self._values.get(\"role\")", "def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:\n return self._values.get('role')", "def iam_role_arn(self) -> str:\n return pulumi.get(self, \"iam_role_arn\")", "def role_arn(self) -> str:\n return pulumi.get(self, \"role_arn\")", "def role(self):\n return self._role", "def role(self):\n return self._role", "def role(self):\n return self._role", "def get_role(self):\n return self.role", "def role(self):\n\n return self._role", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def role(self) -> str:\n\n assert self.data is not None\n return self.data[\"role\"][\"name\"]", "def role_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> Optional[str]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> Optional[str]:\n return pulumi.get(self, \"role_arn\")", "def _get_role(self):\n return self.__role", "def assume_role():\n try:\n return boto3.client('sts')\n except Exception as error:\n logger.info(\"Creating a boto client failed with the following error : {}\".format(error))", "def i_am(user_role):\n return user_role", "def role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_arn\")" ]
[ "0.74046427", "0.7218937", "0.7218937", "0.70252204", "0.7010788", "0.69213706", "0.69096506", "0.6905022", "0.6863561", "0.6854463", "0.68094724", "0.6766141", "0.6766141", "0.6766141", "0.67176384", "0.66420156", "0.66270846", "0.66270846", "0.66270846", "0.6617356", "0.6613674", "0.65674615", "0.6567192", "0.6567192", "0.65611255", "0.6536758", "0.6523362", "0.6492912", "0.6492912", "0.6492912" ]
0.75349903
0
The ID of the Amazon EMR Studio Workspace security group. The Workspace security group allows outbound network traffic to resources in the Engine security group, and it must be in the same VPC specified by `vpc_id`.
def workspace_security_group_id(self) -> pulumi.Output[str]: return pulumi.get(self, "workspace_security_group_id")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def workspace_security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_security_group_id\")", "def workspace_security_group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"workspace_security_group_id\")", "def security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"security_group_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def engine_security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"engine_security_group_id\")", "def engine_security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"engine_security_group_id\")", "def sg_lookup(session, vpc_id, group_name):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_security_groups(Filters=[{\"Name\": \"vpc-id\", \"Values\": [vpc_id]},\n {\"Name\": \"tag:Name\", \"Values\": [group_name]}])\n\n if len(response['SecurityGroups']) == 0:\n return None\n else:\n return response['SecurityGroups'][0]['GroupId']", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def vpc_id(self):\n return self._vpc_id", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> Optional[str]:\n return pulumi.get(self, \"vpc_id\")" ]
[ "0.7212722", "0.7065785", "0.67988247", "0.67988247", "0.6768797", "0.6768797", "0.6625691", "0.6625691", "0.64094466", "0.64094466", "0.64094466", "0.64094466", "0.64094466", "0.64094466", "0.638914", "0.6377499", "0.6354188", "0.6262482", "0.6262482", "0.6262482", "0.6262482", "0.6262482", "0.62619233", "0.625823", "0.625823", "0.625823", "0.625823", "0.625823", "0.625823", "0.61999357" ]
0.72309697
0
Redirect edge to point from first vertex to last according to alphabet e.g. "b a 3" will become "a b 3"
def redirect_edge_alpabetically(edge): if edge[0].lower() > edge[1].lower(): msg = "{} ".format(edge) work = edge[0] edge[0] = edge[1] edge[1] = work msg += "-> {}".format(edge) logger.debug(msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_alphabet(self):\n dag = Graph(self.k)\n # Building the DAG\n for i in range(len(self.word_sample) - 1):\n for a, b in zip(self.word_sample[i], self.word_sample[i + 1]):\n if a != b:\n dag.add_edge(a, b)\n break\n\n dag.print_graph()\n return dag.topology_sort().reverse()", "def prev_letter(letter, step):\r\n\r\n\tif letter in ascii_uppercase:\r\n\t\tnew_letter = get_new_letter(ascii_uppercase, letter, -step)\r\n\telif letter in ascii_lowercase:\r\n\t\tnew_letter = get_new_letter(ascii_lowercase, letter, -step)\r\n\telse:\r\n\t\tnew_letter = letter\r\n\treturn new_letter", "def swapCharacters(word):\n l = list(word)\n temp = word[-1]\n l[-1] = l[0]\n l[0] = temp\n return ''.join(l)", "def backward_character():\r\n set_point(point().offset(-1))", "def _forward(self, letter):\n\t\tself._turn_rotors()\n\t\tl = letter\n\t\tfor i in range(-1, -self.n_rotors - 1, -1):\n\t\t\tl = self._rotor_right2left(self.rotors[i], l, self.offsets[i],\n\t\t\t\t\t\t\t\t\tself.rings[i])\n\t\treturn l", "def _string_reversial(string : list, start : int, end : int): # function is inplace\n if len(string) < 2:\n return\n\n while end > start:\n string[start], string[end] = string[end], string[start]\n start += 1\n end -=1", "def clean_up_letter_a(query):\n key_words = ['line', 'route', 'bus']\n alphabet = ['b','c','d','e','f']\n num_chars = set('1234567890')\n special_cases = {\n 'dash':['dash'],\n 'nite':['nite'],\n 'one':['one'],\n 'link': ['link'], \n 'sounder':['sounder south', 'sounder north'],\n 'monorail':['monorail sc', 'monorail wl'],\n 'amtrak':['amtrak'],\n 'tlink': ['tlink'],\n 'swift':['swift blue', 'swift green'],\n 'duvall':['duvall monroe shuttle'],\n 'trailhead': ['trailhead direct mt. si','trailhead direct mailbox peak','trailhead direct cougar mt.','trailhead direct issaquah alps']\n }\n if len(query) == 1:\n return query\n override = False\n for word in range(len(query)):\n if query[word] == 'a':\n # preceding\n if query[word-1] in key_words:\n pass\n # following\n else:\n try:\n if query[word+1] in key_words:\n pass\n else:\n query[word] = ''\n except:\n query[word] = ''\n elif query[word] in alphabet or query[word] in special_cases or any(char in query[word] for char in num_chars):\n override = True\n\n if override:\n for word in range(len(query)):\n if query[word] is 'a':\n print('word is a')\n query[word] = '' \n\n return query", "def arrange_trigram(t, word):\n return t[1:] + (word,) # returns new tuple", "def alphabet_position(text):\n return ' '.join(str(ord(c) - 96) for c in text.lower() if c.isalpha())\n # return ' '.join(str(string.ascii_lowercase.index(s.lower())+1) for s in text if s.lower() in string.ascii_lowercase)", "def place_in_alphabet(letters):\r\n\tfor l in letters:\r\n\t\tprint(l, ':', str(ALPHABET.index(l)+1))", "def sortEdge(cls,x,y):\n return( Sentence.sortById(x[2],y[2]) )", "def edit_step(word):\n letters = 'abcdefghijklmnopqrstuvwxyz'\n splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]\n deletes = [L + R[1:] for L, R in splits if R]\n transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]\n replaces = [L + c + R[1:] for L, R in splits if R for c in letters]\n inserts = [L + c + R for L, R in splits for c in letters]\n return set(deletes + transposes + replaces + inserts)", "def delete_backward():\r\n point().delete_left_char()\r\n set_point(point().offset(-1))", "def reverse_pair(text):\n newtext = text.split(' ')\n newtext = newtext[-1::-1]\n return ' '.join(newtext)", "def one_pass(self, s: str) -> str:\n alpha_map = {\n '1': 'a', '2': 'b', '3': 'c', '4': 'd', '5': 'e', '6': 'f', '7': 'g',\n '8': 'h', '9': 'i', '10': 'j', '11': 'k', '12': 'l', '13': 'm', '14': 'n',\n '15': 'o', '16': 'p', '17': 'q', '18': 'r', '19': 's', '20': 't',\n '21': 'u',\n '22': 'v', '23': 'w', '24': 'x', '25': 'y', '26': 'z'\n }\n\n i, res = 0, ''\n while i < len(s):\n if i + 2 < len(s) and s[i + 2] == '#':\n res += alpha_map[s[i:i + 2]]\n i += 3\n else:\n res += alpha_map[s[i]]\n i += 1\n return res", "def two_passes(self, s: str) -> str:\n alpha_map = {\n '1': 'a', '2': 'b', '3': 'c', '4': 'd', '5': 'e', '6': 'f', '7': 'g',\n '8': 'h', '9': 'i', '10': 'j', '11': 'k', '12': 'l', '13': 'm', '14': 'n',\n '15': 'o', '16': 'p', '17': 'q', '18': 'r', '19': 's', '20': 't',\n '21': 'u',\n '22': 'v', '23': 'w', '24': 'x', '25': 'y', '26': 'z'\n }\n splitted = s.split('#')\n res = ''\n\n for i in range(len(splitted)):\n j = 0\n if i + 1 < len(splitted) and len(splitted[i]) > 2:\n while j < len(splitted[i]) - 2:\n res += alpha_map[splitted[i][j]]\n j += 1\n\n if i + 1 < len(splitted):\n res += alpha_map[splitted[i][j:]]\n else:\n while j < len(splitted[i]):\n res += alpha_map[splitted[i][j]]\n j += 1\n return res", "def encode(self, letter):\n\n for plug in self.plugleads:\n if plug.pair[0] == letter or plug.pair[1] == letter:\n return plug.encode(letter)\n return letter", "def _get_letters_adjacent_cells(self, grid, x0, y0):\n letters = ''\n for cell in grid.get_adjacent_cells(x0, y0):\n letters = letters + cell['letter']\n return ''.join(sorted(letters))", "def swap(strin, neg_pos):\n key = get_swap()\n strin = strin.lower()\n #checks to see if you are decoding or not\n if neg_pos == '-':\n key = -key\n final = ''\n #the alphabet\n alpha = tuple(string.ascii_lowercase)\n #loop through the message\n for a in strin:\n count = 0\n #sets spaces equal to spaces\n if a == ' ':\n final += ' '\n #keeps characters not in the alphabet in the message\n elif a not in alpha:\n final += a\n for b in alpha:\n #checks to see if character in message matches character in alpha\n if a == b:\n if count + key >= len(alpha):\n #replace with new character\n final += alpha[count + key - len(alpha)]\n else:\n #replace with new character\n final += alpha[count + key]\n count += 1\n return final", "def longswapchar(word: str) -> Iterator[str]:\n\n for first in range(0, len(word) - 2):\n for second in range(first + 2, min(first + MAX_CHAR_DISTANCE, len(word))):\n yield word[:first] + word[second] + word[first+1:second] + word[first] + word[second+1:]", "def _backwards(self, letter):\n\t\tl = letter\n\t\tfor i in range(self.n_rotors):\n\t\t\tl = self._rotor_left2right(self.rotors[i], l, self.offsets[i],\n\t\t\t\t\t\t\t\t\tself.rings[i])\n\t\treturn l", "def swapchar(word: str) -> Iterator[str]:\n\n if len(word) < 2:\n return\n\n for i in range(0, len(word) - 1):\n yield word[:i] + word[i+1] + word[i+1] + word[i+2:]\n\n # try double swaps for short words\n # ahev -> have, owudl -> would\n if len(word) in [4, 5]:\n yield word[1] + word[0] + (word[2] if len(word) == 5 else '') + word[-1] + word[-2]\n if len(word) == 5:\n yield word[0] + word[2] + word[1] + word[-1] + word[-2]", "def convert_letter(list1,str2):\r\n for i in range(len(list1)):\r\n for j in range(len(str2)):\r\n if list1[i][1] == str2[j]:\r\n list1[i][1] = j+1\r\n return list1", "def remove_letter(letter, strng):", "def forward_character():\r\n set_point(point()+1)", "def delete_forward():\r\n point().delete_right_char()", "def edits1(word):\r\n letters = 'abcdefghijklmnopqrstuvwxyz'\r\n splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]\r\n deletes = [L + R[1:] for L, R in splits if R]\r\n transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1]\r\n replaces = [L + c + R[1:] for L, R in splits if R for c in letters]\r\n inserts = [L + c + R for L, R in splits for c in letters]\r\n return set(deletes + transposes + replaces + inserts)", "def next_letter(letter, step):\r\n\r\n\tif letter in ascii_uppercase:\r\n\t\tnew_letter = get_new_letter(ascii_uppercase, letter, step)\r\n\telif letter in ascii_lowercase:\r\n\t\tnew_letter = get_new_letter(ascii_lowercase, letter, step)\r\n\telse:\r\n\t\tnew_letter = letter\r\n\treturn new_letter", "def convert_edges_perm(edges):\n L = dict(edges)\n output = [START_NODE]\n while output[-1] != END_NODE:\n output.append(L[output[-1]])\n if len(edges) + 1 != len(output):\n raise Exception()\n return output", "def change_directions(tree):\n tmp = [] # holds the nodes that have edges pointing to\n new_tree = []\n for e in tree:\n try:\n if tmp.index(e[1])>=0:\n new_tree.append(e[::-1])\n tmp.append(e[0])\n except ValueError:\n new_tree.append(e)\n tmp.append(e[1])\n return new_tree" ]
[ "0.59650934", "0.5724517", "0.5512467", "0.54730046", "0.5430139", "0.54295605", "0.542761", "0.541962", "0.54040015", "0.5391529", "0.5389645", "0.53580797", "0.53372186", "0.5329606", "0.5303027", "0.5292136", "0.52884614", "0.5262027", "0.52558273", "0.52490795", "0.52348065", "0.52293634", "0.52199817", "0.519496", "0.51908296", "0.516084", "0.5116351", "0.51113206", "0.50983787", "0.50968647" ]
0.66512823
0
Get transition vertexes with edges indexes from degrees dictionary
def get_transition_vertexes(dd, start, end): # from degrees dictionary choose vertexes with degree == 2 # except for start and end vertex, combine a list of # such vertexes with indexes of their edges in edges list v_indexes = {} for v in dd: if dd[v][0] == 2 and v != start and v != end: v_indexes[v] = [dd[v][1], dd[v][2]] return v_indexes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vertices(self):\r\n return self.adjacent.keys()", "def _degree_verts(g):\n n = len(g)\n # degs = map(len, g) is a tiny bit slower than the following line\n degs = [ len(g[v]) for v in range(n) ]\n dv = dict()\n for v in range(n):\n degnbr = [0] * n\n for w in g[v]:\n degnbr[degs[w]] += 1\n # Could use defaultdict below, but it does not seem to be faster\n dv.setdefault(tuple(degnbr), []).append(v)\n return dv", "def get_degrees_dictionary(edges):\n dd = {} # degrees dictionary for vertexes\n\n def append_vertex(vertex, edge_index):\n if vertex not in dd.keys():\n dd[vertex] = [1, edge_index]\n else:\n dd[vertex][0] += 1\n dd[vertex].append(edge_index)\n\n e = edges\n for i in range(len(e)):\n append_vertex(e[i][0], i)\n append_vertex(e[i][1], i)\n\n return dd", "def vertices(self):\n return list(self.graph_dict.keys())", "def vertices(self):\r\n return list(self.__graph_dict.keys())", "def vert_degree(input_vertices):\n\tvertex_map = {}\n\tfor element in input_vertices:\n\t\tvertex_map[element] = 0\n\t\tfor x in prob:\n\t\t\tfor vertex in x:\n\t\t\t\tif element == vertex:\n\t\t\t\t\tvertex_map[element] += 1\n\treturn vertex_map", "def vertices(self):\n return list(self.__graph_dict.keys())", "def vertices(self):\n return list(self.__graph_dict.keys())", "def vertices(self):\n return list(self.__graph_dict.keys())", "def obtener_vertices(self):\n return list(self.vertices.keys())", "def out_vertices(self, vertex):\n return self[vertex].keys()", "def __generate_edges(self):\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n edges.append({vertex, neighbour})\n return edges", "def list_vertices(self):\n return list(self.graph_dict.keys())", "def __generate_edges(self):\n\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append( {vertex,neighbour} )\n return edges", "def edge_vertices(edge):\n return [edge.vertex1, edge.vertex2]", "def regions_from_graph(vertices, edges):\n # step 0 remove filaments (not included in original algorithm)\n nv = np.zeros(len(vertices))\n v = vertices.keys()\n v.sort()\n v2e = {}\n for edge in edges:\n s,e = edge\n nv[v.index(s)] += 1\n nv[v.index(e)] += 1\n v2e[s] = edge\n v2e[e] = edge\n\n filament_nodes = np.nonzero(nv==1)[0]\n filaments = []\n for f in filament_nodes:\n filaments.append(v2e[f])\n edges.remove(v2e[f])\n\n #print filaments\n\n # step 1\n # have a twin for each directed edge\n dedges = edges[:]\n for edge in edges:\n new_edge = edge[1], edge[0]\n if new_edge not in dedges:\n dedges.append( (edge[1],edge[0]) )\n\n # step 2 complement each directed edge with an angle formed with horizontal\n # line passing through edge[0] for each edge\n angles = []\n from math import atan2, degrees\n\n for edge in dedges:\n\n v1 = vertices[edge[0]]\n v2 = vertices[edge[1]]\n dx = v2[0] - v1[0]\n dy = v2[1] - v1[1]\n at = atan2(dy, dx)\n d = degrees(at)\n if d < 0:\n d = 360 + d\n angles.append( [ (edge[0],d), (edge[0],edge[1]) ])\n\n # step 3 sort the list into ascending order using vi and angle as primary and\n # secondary keys\n angles.sort()\n\n\n # form wedges on consecutive entries with same vi (vi,vj,dij), (vi,vk,dik)\n # gives the wedge (vk,vi,vj)\n wedges = []\n start = angles[0]\n c = 0\n for i in range(1,len(angles)):\n next_edge = angles[i]\n previous_edge = angles[i-1]\n if next_edge[0][0] == start[0][0]:\n wedge = [ next_edge[1][1], previous_edge[1][0], previous_edge[1][1] ]\n wedges.append(wedge)\n else:\n # first form wedge with last and first entry of current group\n # to do\n wedge = [ start[1][1], previous_edge[1][0], previous_edge[1][1] ]\n wedges.append(wedge)\n start = next_edge\n\n # final pair\n\n wedge = [ start[1][1], previous_edge[1][0], next_edge[1][1] ]\n wedges.append(wedge)\n\n\n # phase two\n # form regions from contiguous wedges\n\n nw = len(wedges)\n used = [0]*nw\n wedges.sort()\n #print wedges\n\n #print 'forming regions'\n\n i = 0\n regions = []\n while sum(used) < nw:\n i = used.index(0)\n wi = wedges[i]\n start = wedges[i]\n used[i] = 1\n region = [start]\n # find next contiguous wedge for wi\n forming = True\n while forming:\n\n\n # find first wedge contiguous to wi\n for j in xrange(nw):\n wj = wedges[j]\n if wj[0] == wi[1] and wj[1] == wi[2]:\n region.append(wj)\n used[j] = 1\n wi = wj\n if wi[1] == start[0] and wi[2] == start[1]:\n forming = False\n regions.append(region)\n #print start, regions\n #raw_input('h')\n break\n\n # put in closed cartographic form\n nodes = []\n for region in regions:\n wedge0 = [ wedge[0] for wedge in region]\n wedge0.append(wedge0[0])\n nodes.append(wedge0)\n\n results = {}\n results['regions'] = nodes\n results['filaments'] = filaments\n\n return results", "def get_vertices(self):\n return self.graph.keys()", "def generate_edges(self):\n edges = []\n for vertex in self.graph_dict:\n for neighbour in self.graph_dict[vertex]:\n if (neighbour, vertex) not in edges:\n edges.append((vertex, neighbour))\n \n for pair in edges:\n for otherpair in edges:\n if pair[1] == otherpair[0]:\n edges.append((pair[0],otherpair[1]))\n return edges", "def __generate_edges(self):\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append({vertex, neighbour})\n return edges", "def vertices(self):\n return self.keys()", "def __generate_edges(self):\r\n edges = []\r\n for vertex in self.__graph_dict:\r\n for neighbor in self.__graph_dict[vertex]:\r\n if {neighbor, vertex} not in edges:\r\n edges.append({vertex, neighbor})\r\n return edges", "def vert_ind_as_val(input_vertices):\n\tvertex_map = {}\n\tfor element in input_vertices:\n\t\tvertex_map[element] = element\n\treturn vertex_map", "def vertices(self):\n s = set([x for x in self.edges.keys()])\n t = set([y for v in self.edges.values() for (y,d) in v.items()])\n v = s.union(t)\n return list(v)", "def generate_edges(self):\n edges = []\n for vertex in self.graph_dict:\n for neighbour in self.graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append({neighbour, vertex})\n return edges", "def vertices(self):\n return self._outgoing.keys()", "def vertices(self):\n return list(self.__graph.values())", "def edges(self):\n return [(k, val) for k, v in self.dict.iteritems() for val in v]", "def compute_in_degrees(digraph):\n # initialize in-degrees dictionary with zero values for all vertices\n in_degree = {}\n for vertex in digraph:\n in_degree[vertex] = 0\n # consider each vertex\n for vertex in digraph:\n # amend in_degree[w] for each outgoing edge from v to w\n for neighbour in digraph[vertex]:\n in_degree[neighbour] += 1\n return in_degree", "def iter_node_map(self):\n return self.d_inv.keys()", "def get_all_vertices(self):\r\n for vertex in self.__neighbours.keys():\r\n yield vertex" ]
[ "0.67272043", "0.6594935", "0.65947855", "0.63876325", "0.6276506", "0.6225013", "0.6201821", "0.6201821", "0.6201821", "0.61925185", "0.61531764", "0.61029667", "0.6101509", "0.6093262", "0.60884774", "0.6087071", "0.6083367", "0.6081893", "0.6076063", "0.6063979", "0.60496074", "0.6035866", "0.6017226", "0.60046256", "0.5988657", "0.5976382", "0.59706545", "0.59700614", "0.5965977", "0.59356725" ]
0.7821683
0
Find and reduces parallel edges in the list Sorts list.
def reduce_parallel(edges): edges[:] = qsort(edges) logger.debug("Performing parallel optimization\n") e = edges # use shorter name for edges l = 0 # l stands for left and points to left side of comparison # r stands for right and points to the right side of comparison # items are sorted in the list, so parallel # edges are going to be next to each other for r in range(1, len(e)): logger.debug("CMP e[{}]:{} to e[{}]:{}".format(l, e[l], r, e[r])) if (e[l][0] == e[r][0] and e[l][1] == e[r][1]): # for parallel edges - overwrite left delay with balanced delay # except if one of the edges is zero = set zero if(e[r][2] == 0): e[l][2] = 0 else: e[l][2] = (e[l][2] * e[r][2]) / (e[l][2] + e[r][2]) # keep right edge value unchanged as # it is going to be overwritten later, # when not parallel edges is going to be found logger.debug(" Parallel: new value e[{}]:{}".format(l, e[l])) else: # for not equal edges # copy right component to the place right after left. # e.g.: with l=0, r=1 this is going to be self assignment, # with l=3, r=5 this is going to overwrite one of old parallels # that were merged with their lefts l += 1 e[l] = e[r] logger.debug(" NOT Parallel: e[{}]:{} <= e[{}]:{}".format( l, e[l], r, e[r])) # by the time this loop is ended l is going to point # to last meaningful edge in the list kept = l + 1 removed = len(e) - kept logger.debug("\n{} parallels removed\n".format(removed)) e[:] = e[:kept] logger.debug("Parallel reduce result edges: {}\n".format(e)) return removed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sortEdges( self, listEdges ):\r\n changed = True\r\n while changed:\r\n changed = False\r\n for i in range( len(listEdges)-1 ):\r\n if listEdges[ i ].cost > listEdges[ i+1 ].cost:\r\n changed = True\r\n aux = listEdges[ i ]\r\n listEdges[ i ] = listEdges[ i+1 ]\r\n listEdges[ i+1 ] = aux", "def _sorting(self, notsorted_list, predecessors):\n remaining_nodes = []\n sorted_part = []\n for nd in notsorted_list:\n if not predecessors[nd.name]:\n sorted_part.append(nd)\n else:\n remaining_nodes.append(nd)\n return sorted_part, remaining_nodes", "def walk_sort(edges):\n g = nx.Graph()\n g.add_edges_from(edges)\n connected = set()\n degree = nx.degree(g)\n ordering = []\n while degree:\n next = max_degree_node(g, degree, connected)\n if next is not None:\n ordering.append(next)\n else:\n break\n return ordering", "def dp_partition(edges, to_add=[], to_remove=[]):\n if not edges:\n return to_add, [edge_id for edge_id in to_remove if edge_id is not None]\n\n \"\"\" Take the minimum of two results:\n - merge the first two edges, and consider all remaining edges\n - do not merge the first edge, and consider all remaining edges. \"\"\"\n\n \"\"\" Possibility 1: Do not merge the first two edges. \n Result: Partition on all of the remaining edges. Add the current edge to to_add, \n and the current edge to to_remove. \"\"\"\n skip_edge = dp_partition(edges[1:], to_add + [edges[0]], to_remove + [edges[0][2]])\n\n \"\"\" Possibility 2: Merge the first two edges. \n Result: Partition the newly merged edge with all of the remaining edges, we add \n nothing to to_add because the merged edge may be merged again, \n and we remove the two edges which were merged. \"\"\"\n try:\n merge_edge = dp_partition([merge(edges[0], edges[1])] + edges[2:], to_add,\n to_remove + [edges[0][2]] + [edges[1][2]])\n except (AssertionError, IndexError) as exception:\n \"\"\" Either the first two edges in the pool cannot be merged, or there is only one edge remaining\n in the pool. In both cases, partition without merging. \"\"\"\n merge_edge = skip_edge\n\n \"\"\" Return the result which adds the fewest edges. \"\"\"\n return min(merge_edge, skip_edge, key=lambda pair: len(pair[0]))", "def find_edges(self):\n self.edges = [deepcopy(self.grid[0]), [], deepcopy(self.grid[-1]), []]\n for g in self.grid:\n self.edges[3].append(g[0])\n self.edges[1].append(g[-1])\n self.edges[2]\n self.edges[3]", "def task1(graph, n):\r\n alreadyProcessed = set()\r\n B = [j for j in range(1,n+1)]\r\n position = {B[i]:i for i in range(len(B))}\r\n leftNeighbors = {}\r\n parent = {}\r\n \r\n for v in B:\r\n # nodes processed before the current that have an edge in common are left neighbors\r\n leftNeighbors[v] = set(graph._graph[v]) & alreadyProcessed\r\n alreadyProcessed.add(v)\r\n if leftNeighbors[v]:\r\n # the parent is the closest left neighbor \r\n parent[v] = B[max([position[w] for w in leftNeighbors[v]])]\r\n # if this node's neighbors (other then the parent itself) are not a subset of the parent's neighbors \r\n # it means that it's not a lexOrder\r\n if not leftNeighbors[v] - {parent[v]} <= leftNeighbors[parent[v]]:\r\n return []\r\n return B", "def _makeEdges(self):\n self.edges = set()\n\n for i in range(self.size):\n self.edges.add(makePair(self.tour[i - 1], self.tour[i]))", "def get_bridges(edges_list):\n\n # print(\"all edges:\", edges_list)\n\n # make a temporary graph\n temp_G = nx.Graph()\n\n # add all current edges to the graph\n for edge in edges_list:\n edge_node_1, edge_node_2 = edge\n temp_G.add_edge(edge_node_1, edge_node_2)\n\n # get all_bridges in temp graph\n bridges_all = list(nx.bridges(temp_G))\n\n # get set of edges with two traversals left (only want one of each, so use set)\n mult_trav_remaining = set([])\n\n for edge in edges_list:\n\n num_trav_remaining = edges_list.count(edge)\n\n if num_trav_remaining > 1:\n\n mult_trav_remaining.add(edge)\n\n mult_trav_remaining = list(mult_trav_remaining)\n\n # remove mult traversal edges from bridges list\n\n # print(\"bridges_ all:\", bridges_all)\n # print(\"\\nmult_trav_remaining:\", mult_trav_remaining)\n\n # make a new bridges list that contains only edges that don't have mult traversals left\n\n bridges_reduced = []\n\n for edge in bridges_all:\n # print(\"\\n\\nedge:\", edge)\n # print()\n if edge in mult_trav_remaining:\n continue\n # print()\n # print(f\"bridge {edge} is in {mult_trav_remaining}\")\n elif edge[::-1] in mult_trav_remaining:\n continue\n # print()\n # print(f\"bridge {edge} REVERSED is in {mult_trav_remaining}\")\n else:\n # print(f\"bridge {edge} is NOT in {mult_trav_remaining}\")\n\n bridges_reduced.append(edge)\n\n # return a list of true bridges\n return bridges_reduced", "def equalize_node_density(self, maximum_distance, maximum_angle_delta, greedy=True):\n print('network: splitting long edges...')\n \"\"\" Split edges which are very long. \"\"\"\n self.split_edges(maximum_distance)\n\n print('network: merging short edges...')\n vertices_to_remove = []\n edges_to_add = []\n \"\"\" Merge edges which are close together, and collect vertices/edges which should be removed/added. \"\"\"\n for section_id in self.sections:\n utils.print_progress(len(self.sections), prefix='merging edges')\n new_edges, redundant_vertices = self.merge_edges(self.sections[section_id], maximum_distance,\n maximum_angle_delta, greedy)\n vertices_to_remove.extend(redundant_vertices)\n edges_to_add.extend(new_edges)\n # Maintain the section list\n self.sections[section_id] = list(filter(lambda v: v not in redundant_vertices, self.sections[section_id]))\n\n \"\"\" Add the new edges and edge weights into the graph. \"\"\"\n for edge, weight in edges_to_add:\n utils.print_progress(len(edges_to_add), prefix='adding edges')\n new_edge = self.graph.add_edge(edge[0], edge[1], add_missing=False)\n self.edge_weights[new_edge] = weight\n\n \"\"\" Removing vertices reindexes the vertices and edges of the graph. Need to maintain external data \n structures to prevent data corruption. \"\"\"\n original_indices = self.graph.vertex_index.copy() # Property map will correct for reindexing\n self.graph.remove_vertex(vertices_to_remove, fast=True)\n # Vertices have now been reindexed. Update each section with the new vertex IDs.\n for section_id in self.sections:\n utils.print_progress(len(self.sections), prefix='reindexing vertices')\n self.sections[section_id] = [find_vertex(self.graph, original_indices, v)[0] for v in\n self.sections[section_id]]\n\n return self.graph.num_vertices()", "def prims(self, city_list):\r\n start = city_list[0]\r\n for v in city_list:\r\n v.priority = self.distance(v, start)\r\n v.parent = start\r\n mst = defaultdict(list)\r\n for i in range(len(city_list) - 1):\r\n minimum = 9999999999999\r\n minVertex = start\r\n for v in city_list:\r\n if v.priority > 0 and v.priority < minimum:\r\n minimum = v.priority\r\n minVertex = v\r\n minVertex.priority = 0\r\n # add edge to MST\r\n addEdge(mst, minVertex, minVertex.parent)\r\n for v in city_list:\r\n if v.priority > self.distance(v, minVertex):\r\n v.priority = self.distance(v, minVertex)\r\n v.parent = minVertex\r\n\r\n return generate_edges(mst)", "def common_ad_jaccard_pruning(edge_list=\n path+'connected-component-analysis-round2/network-profiling-data/cid6_analysis/cid6-edge-list',\n worker_ads_file = path+'connected-component-analysis-round2/network-profiling-data/cid6_analysis/worker-ads-int-dict.json'):\n G = nx.read_edgelist(edge_list, delimiter='\\t')\n worker_ints = json.load(open(worker_ads_file, 'r'))\n print nx.info(G)\n threshold = 0.0\n count = 0\n forbidden_phones = set()\n # with codecs.open(edge_phone_count, 'r', 'utf-8') as f:\n # for line in f:\n # obj = json.loads(line[0:-1])\n # if int(obj.keys()[0]) >= threshold:\n # forbidden_phones = forbidden_phones.union(set(obj[obj.keys()[0]]))\n # with codecs.open(phone_edge_list, 'r', 'utf-8') as f:\n # for line in f:\n # fields = re.split('\\t', line[0:-1])\n # phones = set(fields[2:])\n # if len(phones.intersection(forbidden_phones)) != 0:\n # count += 1\n # G.remove_edge(fields[0], fields[1])\n H = nx.Graph()\n for e in G.edges:\n if e[0] not in worker_ints or e[1] not in worker_ints:\n raise Exception\n else:\n w1 = set(worker_ints[e[0]])\n w2 = set(worker_ints[e[1]])\n j = len(w1.intersection(w2)) * 1.0 / len(w1.union(w2))\n if j <= threshold:\n H.add_edge(e[0], e[1])\n else:\n count += 1\n print str(count),' edges pruned from graph'\n print nx.info(H)\n ccs = sorted(nx.connected_components(H), key=len, reverse=True)\n print len(ccs)\n print len(ccs[0])", "def posort(l, *cmps):\r\n comes_before = dict((a, set()) for a in l)\r\n comes_after = dict((a, set()) for a in l)\r\n\r\n def add_links(a, b): # b depends on a\r\n comes_after[a].add(b)\r\n comes_after[a].update(comes_after[b])\r\n for c in comes_before[a]:\r\n comes_after[c].update(comes_after[a])\r\n comes_before[b].add(a)\r\n comes_before[b].update(comes_before[a])\r\n for c in comes_after[b]:\r\n comes_before[c].update(comes_before[b])\r\n\r\n def check():\r\n \"\"\" Tests for cycles in manufactured edges \"\"\"\r\n for a in l:\r\n for b in l:\r\n assert not(b in comes_after[a] and a in comes_after[b])\r\n\r\n for cmp in cmps:\r\n for a in l:\r\n for b in l:\r\n if cmp(a, b) < 0: # a wants to come before b\r\n # if this wouldn't cause a cycle and isn't already known\r\n if not b in comes_before[a] and not b in comes_after[a]:\r\n add_links(a, b)\r\n # check() # debug code\r\n\r\n return _toposort(comes_after)", "def split_edges(self, maximum_distance):\n \"\"\" Iterate through the vertices of each section. For each vertex v, evaluate edges for which v is a source.\n If an edge of weight greater than maximum_distance, then split it. \"\"\"\n for section_id in self.sections:\n utils.print_progress(len(self.sections), prefix='splitting edges')\n current_section = [] # Need to update the section data after splitting the edges.\n for source in self.sections[section_id]:\n current_section.append(source)\n edges_to_remove = [] # If an edge is split, it will need to be removed.\n for edge in self.graph.get_out_edges(source):\n if self.edge_weights[edge] > maximum_distance:\n target = edge[1] # edge is a numpy array of [source, target, edge]. Select target.\n edges_to_remove.append(self.graph.edge(edge[0], edge[\n 1])) # If an edge is split, the original edge should be removed.\n\n new_edge_count = int(math.ceil(self.edge_weights[edge] / maximum_distance))\n new_edge_distance = self.edge_weights[edge] / new_edge_count\n current_point = shapes.Point.from_list(\n list(self.node_locations[source]) + [self.node_heading[target]])\n previous_vertex = source\n for _ in range(new_edge_count):\n current_point = utils.offset_point(current_point, new_edge_distance, current_point.bearing)\n current_vertex = self.graph.add_vertex()\n current_section.append(current_vertex) # The new vertex becomes a part of the section.\n \"\"\" Populate the property map for the new vertex. Inherit values from the target node,\n unless the target node is a junction node. Then inherit values from the source. \"\"\"\n self.node_locations[current_vertex] = current_point.as_list()\n self.node_heading[current_vertex] = current_point.bearing\n property_vertex = source if not self.junctions[target] else target\n self.node_speed_limit[current_vertex] = self.node_speed_limit[property_vertex]\n self.node_width[current_vertex] = self.node_width[property_vertex]\n self.node_id[current_vertex] = self.node_id[property_vertex]\n\n \"\"\" Create an edge between the previous vertex and the newly created vertex, \n and update the edge weight property map. \"\"\"\n current_edge = self.graph.add_edge(previous_vertex, current_vertex)\n self.edge_weights[current_edge] = new_edge_distance\n\n # The current vertex becomes the previous vertex in the next step.\n previous_vertex = current_vertex\n\n \"\"\" Create an edge between the last new vertex that was created and the target of the\n original edge which is being split, and update the property map. \"\"\"\n self.edge_weights[self.graph.add_edge(previous_vertex, target)] = new_edge_distance\n list(map(self.graph.remove_edge, edges_to_remove)) # Remove all relevant edges\n self.sections[section_id] = current_section # Update the section with the new vertices", "def _connect_neighbours(self):\n for prev in self.unvisited:\n for next in self.unvisited:\n if (next[0] == prev[0] and next[1] == prev[1] + 1) or (next[0] == prev[0] + 1 and next[1] == prev[1]):\n self.graph.addEdge((prev, next))\n self.visited.add(prev)\n self.visited.add(next)\n if self._find_intersection():\n self.intersection.append(prev)\n self.intersection.append(next)", "def topo_sort(self):\n # TODO: detect cycles\n self.find_reachable_nodes()\n # save list of nodes in topo order\n self.nodes = []\n # assign each node an id field incrementally\n cur_id = 0\n # count visited outgoing edges for each node\n unvisited = {}\n for nid, node in list(self.found.items()):\n unvisited[nid] = node.nout\n queue = [self.root]\n #print >>sys.stderr, '+++'\n while queue:\n # take off nodes whose all outgoing edges are visited from\n # queue head\n node = queue.pop(0)\n self.nodes.append(node)\n node.hg = self\n node.id = cur_id\n cur_id += 1\n for edge in node.incoming:\n edge.hg = self\n for tailnode in edge.tail:\n #print >>sys.stderr, tailnode\n unvisited[id(tailnode)] -= 1\n if unvisited[id(tailnode)] == 0:\n queue.append(tailnode)\n self.sanity_check()\n self.tasks_done.add('topo_sort')", "def connect_persons(persons, character_list):\n edge_list = []\n persons_fixed = []\n for i in range(len(persons)):\n for j in range(len(character_list)):\n if(persons[i] in character_list[j][0]):\n persons_fixed.append(sorted(character_list[j][0], key=len)[-1])\n \n for i in range(1, len(persons_fixed)):\n edge_list.append((persons_fixed[0], persons_fixed[1]))\n \n return(edge_list)", "def toposorted(self):\n order = []\n colors = {node: \"white\" for node in self._neighbors}\n\n def visit(node):\n assert colors[node] == \"white\"\n colors[node] = \"gray\"\n for neighbor in self._neighbors[node]:\n if colors[neighbor] == \"white\":\n visit(neighbor)\n elif colors[neighbor] == \"gray\":\n raise CyclicGraphError(\n \"Cycle involving {!r} and {!r} detected\".format(node, neighbor)\n )\n order.append(node)\n colors[node] = \"black\"\n\n for node in self._neighbors:\n if colors[node] == \"white\":\n visit(node)\n return order", "def cyclic_sort_vertices_2d(Vlist):\n if len(Vlist)==0: return Vlist\n\n adjacency_matrix = Vlist[0].polyhedron().vertex_adjacency_matrix()\n result = [ Vlist.pop() ]\n while len(Vlist)>0:\n for i in range(len(Vlist)):\n if adjacency_matrix[Vlist[i].index(), result[-1].index()] == 1:\n result.append( Vlist.pop(i) )\n break;\n else:\n raise ValueError\n return result", "def main(edges=[(0, 1, 3), (1, 3, 4), (2, 3, 3), (0, 2, 2) ], num=4):\n\n # initialize routers array\n routers = []\n for x in range(num):\n routers.append([1000] * num)\n routers[x][x] = 0\n \n # set distance to all neighbours \n for edge in edges:\n routers[edge[0]][edge[1]] = edge[2]\n routers[edge[1]][edge[0]] = edge[2]\n\n start_table = routers.copy()\n\n flag = True\n while flag:\n upflag = False\n for nbrs in edges:\n routers[nbrs[0]], up_flag1 = update_table(routers[nbrs[0]], routers[nbrs[1]], dist=nbrs[2])\n routers[nbrs[1]], up_flag2 = update_table(routers[nbrs[1]], routers[nbrs[0]], dist=nbrs[2])\n upflag = upflag or up_flag1 or up_flag2\n\n flag = upflag\n\n return start_table, routers", "def eliminationFeuilles(self,edges,vertices):\n dictAdjacenceACPM = {n : set() for n in vertices}\n for edge in edges:\n s,t = self.getIdVerticesOfEdge(edge)\n if not(s in self.setTerminals) :\n dictAdjacenceACPM[s].add(t)\n if not(t in self.setTerminals):\n dictAdjacenceACPM[t].add(s)\n vertices_degree_one = { n for n in vertices if len(dictAdjacenceACPM[n])==1}\n\n dictSteinerNodeDelete = {n : 1 for n in (set(vertices) - vertices_degree_one) }\n return dictSteinerNodeDelete", "def convert_edges_perm(edges):\n L = dict(edges)\n output = [START_NODE]\n while output[-1] != END_NODE:\n output.append(L[output[-1]])\n if len(edges) + 1 != len(output):\n raise Exception()\n return output", "def edgify(vertices:list)->list:\n edges = []\n for k in range(0, len(vertices) - 1):\n edges.append([vertices[k], vertices[k + 1]])\n return edges", "def sorting(self, presorted=None):\n self._sorted_nodes = []\n if presorted:\n notsorted_nodes = copy(presorted)\n else:\n notsorted_nodes = copy(self.nodes)\n predecessors = {key: copy(val) for (key, val) in self.predecessors.items()}\n\n # nodes that depends only on the self._nodes_wip should go first\n # soe remove them from the connections\n for nd_out in self._node_wip:\n for nd_in in self.successors[nd_out.name]:\n predecessors[nd_in.name].remove(nd_out)\n\n while notsorted_nodes:\n sorted_part, notsorted_nodes = self._sorting(notsorted_nodes, predecessors)\n self._sorted_nodes += sorted_part\n for nd_out in sorted_part:\n for nd_in in self.successors[nd_out.name]:\n predecessors[nd_in.name].remove(nd_out)", "def resort_couplings(J,sortIx):\n return", "def all_pairs(items, sort=False):\n if sort:\n items = sorted(items)\n for i, ni in enumerate(items):\n for j, nj in enumerate(items):\n if j > i: yield ni, nj", "def _toposort(edges):\r\n incoming_edges = reverse_dict(edges)\r\n incoming_edges = dict((k, set(val)) for k, val in incoming_edges.items())\r\n S = set((v for v in edges if v not in incoming_edges))\r\n L = []\r\n\r\n while S:\r\n n = S.pop()\r\n L.append(n)\r\n for m in edges.get(n, ()):\r\n assert n in incoming_edges[m]\r\n incoming_edges[m].remove(n)\r\n if not incoming_edges[m]:\r\n S.add(m)\r\n if any(incoming_edges.get(v, None) for v in edges):\r\n raise ValueError(\"Input has cycles\")\r\n return L", "def join_edge_group(edges, orientation, tolerance=DEFAULT_JOIN_TOLERANCE):\n if orientation == \"h\":\n min_prop, max_prop = \"x0\", \"x1\"\n elif orientation == \"v\":\n min_prop, max_prop = \"top\", \"bottom\"\n else:\n raise ValueError(\"Orientation must be 'v' or 'h'\")\n\n sorted_edges = list(sorted(edges, key=itemgetter(min_prop)))\n joined = [sorted_edges[0]]\n for e in sorted_edges[1:]:\n last = joined[-1]\n if e[min_prop] <= (last[max_prop] + tolerance):\n if e[max_prop] > last[max_prop]:\n # Extend current edge to new extremity\n joined[-1] = resize_object(last, max_prop, e[max_prop])\n else:\n # Edge is separate from previous edges\n joined.append(e)\n\n return joined", "def find_topo_sort(node_list):\r\n visited = set()\r\n topo_order = []\r\n #print(node_list)\r\n for node in node_list:\r\n topo_sort_dfs(node, visited, topo_order)\r\n return topo_order", "def algorithm(self):\n t = time.clock()\n self.calculateFirstPath()\n improve = True\n while improve and (self.allowedTime > (time.clock() - t)):\n improve = False\n\n for i in range(self.NB_OF_NODES):\n for j in range(self.NB_OF_NODES):\n if j in [(i - 1) % self.NB_OF_NODES, i, (i + 1) % self.NB_OF_NODES]:\n continue\n\n if self.getDistance(i, i + 1) + self.getDistance(j, j + 1) > self.getDistance(i, j) + self.getDistance(i + 1, j + 1):\n self.exchange(i, j)\n improve = True", "def filter_graph(self, sorted_node, ploidy):\n \n for node in sorted_node:\n \n # while number of prefix edge > ploidy level\n while len(self.prefix[node]) > ploidy:\n min_weight_node = min(self.prefix[node], key=self.prefix[node].get)\n self.remove_edge(min_weight_node, node)\n \n # while number of suffix edge > ploidy level\n while len(self.suffix[node]) > ploidy:\n min_weight_node = min(self.suffix[node], key=self.suffix[node].get)\n self.remove_edge(node, min_weight_node)\n \n print(\"Graph is reduced to best overlap graph.\")" ]
[ "0.65361464", "0.59675485", "0.5884665", "0.56545603", "0.560666", "0.5599113", "0.5576444", "0.5544006", "0.55268884", "0.5505579", "0.546652", "0.54547846", "0.5450952", "0.544428", "0.5394301", "0.53624326", "0.5361598", "0.5353389", "0.53372085", "0.53365934", "0.5315151", "0.5311894", "0.53092575", "0.53037244", "0.5293969", "0.52881527", "0.52842176", "0.52830756", "0.5281424", "0.52649987" ]
0.6688602
0
Reduces transitional vertexes in graph described in edges
def reduce_sequential(edges, start, end): dd = get_degrees_dictionary(edges) # O(len(edges)) tvs = get_transition_vertexes(dd, start, end) # O(len(dd)) logger.debug("dd: {}".format(dd)) logger.debug("tvs: {}".format(tvs)) for v in tvs: # for each vertex in transitional vertexes # edges ei1 = tvs[v][0] ei2 = tvs[v][1] e1 = edges[ei1] # e1 is going to save resulted edge e2 = edges[ei2] # e2 is going to become cycled and then removed # vertexes # v - vertex to be removed # v1 - vertex, connected to v by e1 edge (unchanged) # v2 - vertex, connected to v by e2 edge # will be moved to e1 substituting v there # edges list in transitional vertex dictionary will be updated logger.debug("Substituted {}: {}:{}, {}:{} -> ".format( v, ei1, e1, ei2, e2)) # v is going to be substituted in e1 by value of "not v" vertex in e2 substitute_index_in_ei2 = 1 - e2.index(v) # if vi=0 s=1; v=1 s=0 # replace v in ei1 by substitute from ei2 v2 = e2[substitute_index_in_ei2] e1[e1.index(v)] = v2 e2[substitute_index_in_ei2] = v # here we will have 2 edges # edges[ei1] -> ['v1', 'v2', ?] # # edges[ei2] -> ['v', 'v', 5] # delay not changed # updated edges for substituted vertex in tvs dict to point to # ei1 edge instead of ei2 # e.g. 'v2' was connected by ei2, now is connected by ei1 if v2 != start and v2 != end: # v2 is not present in tvi and shouldn't be updated v2ei = tvs[v2] # list of edges indexes for v2 vei = tvs[v] # list of edges indexes for v v2ei[v2ei.index(ei2)] = ei1 logger.debug("tvs[{}][2] = t[1] : {} = {}".format( v2, tvs[v2][2], t[1])) # update weight new_weight = e1[2] + e2[2] e1[2] = new_weight # normalize result edge redirect_edge_alpabetically(e1) # here we will have 2 edges # edges[ei1] -> ['v1', 'v2', 8] # # edges[ei2] -> ['v', 'v', 5] # delay not changed # only thing left is to remove the ei2 edge, this will be done later # not to break iteration over edges logger.debug("{}:{}, {}:{}".format(ei1, e1, ei2, e2)) # get indexes of edges to be removed indexes = [i for i in reversed(sorted([tvs[v][1] for v in tvs]))] logger.debug("Edges index removed after sequential update: {}".format( indexes)) for i in indexes: edges.pop(i) return len(tvs) # amount of edges removed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_edges(self):", "def contract_edge(graph, u, v):\n graph[v] = (graph[v] | graph[u]) - {u, v}\n del graph[u]\n for w in graph:\n if u in graph[w]:\n graph[w] = (graph[w] | {v}) - {u, w}", "def mutateEdge(g, edges, directed, connected):\n if ((directed and g.e == g.n ** 2 - g.n)\n or (not directed and g.e == (g.n ** 2 - g.n) / 2)): # Complete graph\n return\n\n if (g.e > edges):\n while g.e != edges:\n removeEdge(g, directed)\n g.e -= 1\n elif (g.e < edges):\n while g.e != edges:\n addEdge(g, directed, connected)\n g.e += 1\n else: # Edge count is correct, just do an edge swap for the mutation\n removeEdge(g, directed)\n addEdge(g, directed, connected)", "def eliminationFeuilles(self,edges,vertices):\n dictAdjacenceACPM = {n : set() for n in vertices}\n for edge in edges:\n s,t = self.getIdVerticesOfEdge(edge)\n if not(s in self.setTerminals) :\n dictAdjacenceACPM[s].add(t)\n if not(t in self.setTerminals):\n dictAdjacenceACPM[t].add(s)\n vertices_degree_one = { n for n in vertices if len(dictAdjacenceACPM[n])==1}\n\n dictSteinerNodeDelete = {n : 1 for n in (set(vertices) - vertices_degree_one) }\n return dictSteinerNodeDelete", "def __filterEdges(self):", "def convert_edges_perm(edges):\n L = dict(edges)\n output = [START_NODE]\n while output[-1] != END_NODE:\n output.append(L[output[-1]])\n if len(edges) + 1 != len(output):\n raise Exception()\n return output", "def edgify(vertices:list)->list:\n edges = []\n for k in range(0, len(vertices) - 1):\n edges.append([vertices[k], vertices[k + 1]])\n return edges", "def evert(self):\n for e in self.edges:\n self.invert()\n for f in self.faces:\n f.invert()", "def clean_edges(self):\n for from_node in self.all_nodes():\n for to_node in self.all_nodes():\n if from_node == to_node:\n continue\n dup = list(filter(lambda x: x.from_node == from_node and x.to_node == to_node, self.edges))\n if len(dup) > 1:\n for d in dup[1:]:\n self.edges.remove(d)", "def add_all_edges(self):\n for n1 in self.vertices():\n for n2 in self.vertices():\n if n1 != n2:\n self.add_edge((n1, n2))", "def edge_mapping(self):\n ...", "def reduce_edges(edge_index):\n new_edge_index = {}\n for edge in edge_index.T:\n if edge[0] > edge[1]:\n edge = torch.flip(edge, [-1])\n edge = edge.detach().numpy()\n name = str(edge[0]) + str(edge[1])\n if name not in new_edge_index.keys():\n new_edge_index[name] = edge\n\n return torch.tensor(list(new_edge_index.values())).T", "def de2ue(edges):\n return set(de for de in edges if tuple(reversed(de)) in edges)", "def remove_inconsistent_edges(graph: BELGraph) -> None:\n for u, v in get_inconsistent_edges(graph):\n edges = [(u, v, k) for k in graph[u][v]]\n graph.remove_edges_from(edges)", "def remove_edges(self, node: NodeKey) -> Edge:", "def _inverse_edges(edges: np.array) -> np.array:\n inversed_edges = edges.copy()\n inversed_edges[:, [0, 1]] = inversed_edges[:, [1, 0]]\n return inversed_edges", "def CopyReplaceVertices(self, *args):\n return _ShapeBuild.ShapeBuild_Edge_CopyReplaceVertices(self, *args)", "def regions_from_graph(vertices, edges):\n # step 0 remove filaments (not included in original algorithm)\n nv = np.zeros(len(vertices))\n v = vertices.keys()\n v.sort()\n v2e = {}\n for edge in edges:\n s,e = edge\n nv[v.index(s)] += 1\n nv[v.index(e)] += 1\n v2e[s] = edge\n v2e[e] = edge\n\n filament_nodes = np.nonzero(nv==1)[0]\n filaments = []\n for f in filament_nodes:\n filaments.append(v2e[f])\n edges.remove(v2e[f])\n\n #print filaments\n\n # step 1\n # have a twin for each directed edge\n dedges = edges[:]\n for edge in edges:\n new_edge = edge[1], edge[0]\n if new_edge not in dedges:\n dedges.append( (edge[1],edge[0]) )\n\n # step 2 complement each directed edge with an angle formed with horizontal\n # line passing through edge[0] for each edge\n angles = []\n from math import atan2, degrees\n\n for edge in dedges:\n\n v1 = vertices[edge[0]]\n v2 = vertices[edge[1]]\n dx = v2[0] - v1[0]\n dy = v2[1] - v1[1]\n at = atan2(dy, dx)\n d = degrees(at)\n if d < 0:\n d = 360 + d\n angles.append( [ (edge[0],d), (edge[0],edge[1]) ])\n\n # step 3 sort the list into ascending order using vi and angle as primary and\n # secondary keys\n angles.sort()\n\n\n # form wedges on consecutive entries with same vi (vi,vj,dij), (vi,vk,dik)\n # gives the wedge (vk,vi,vj)\n wedges = []\n start = angles[0]\n c = 0\n for i in range(1,len(angles)):\n next_edge = angles[i]\n previous_edge = angles[i-1]\n if next_edge[0][0] == start[0][0]:\n wedge = [ next_edge[1][1], previous_edge[1][0], previous_edge[1][1] ]\n wedges.append(wedge)\n else:\n # first form wedge with last and first entry of current group\n # to do\n wedge = [ start[1][1], previous_edge[1][0], previous_edge[1][1] ]\n wedges.append(wedge)\n start = next_edge\n\n # final pair\n\n wedge = [ start[1][1], previous_edge[1][0], next_edge[1][1] ]\n wedges.append(wedge)\n\n\n # phase two\n # form regions from contiguous wedges\n\n nw = len(wedges)\n used = [0]*nw\n wedges.sort()\n #print wedges\n\n #print 'forming regions'\n\n i = 0\n regions = []\n while sum(used) < nw:\n i = used.index(0)\n wi = wedges[i]\n start = wedges[i]\n used[i] = 1\n region = [start]\n # find next contiguous wedge for wi\n forming = True\n while forming:\n\n\n # find first wedge contiguous to wi\n for j in xrange(nw):\n wj = wedges[j]\n if wj[0] == wi[1] and wj[1] == wi[2]:\n region.append(wj)\n used[j] = 1\n wi = wj\n if wi[1] == start[0] and wi[2] == start[1]:\n forming = False\n regions.append(region)\n #print start, regions\n #raw_input('h')\n break\n\n # put in closed cartographic form\n nodes = []\n for region in regions:\n wedge0 = [ wedge[0] for wedge in region]\n wedge0.append(wedge0[0])\n nodes.append(wedge0)\n\n results = {}\n results['regions'] = nodes\n results['filaments'] = filaments\n\n return results", "def __generate_edges(self):\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append({vertex, neighbour})\n return edges", "def edge_vertices(edge):\n return [edge.vertex1, edge.vertex2]", "def to_edges(graph):\n return list(zip(graph[:-1], graph[1:]))", "def invert_arrowheads(graph):\n for edge in graph.edges():\n edge.attr['arrowhead'] = 'inv'", "def make_auxiliar_network(edges):\n\n s = S;t = T;na = {}\n fl = deque([s]) # fathers layer\n cl = deque([]) # childrens layer\n level = 0\n vl = {s:level} # vertex : level\n while len(fl) > 0:\n key = fl.popleft()\n for e in edges:\n if e['used']: continue\n \n r = e['capacity'] - e['flow']\n if key == e['first'] and r > 0:\n v = {'id':e['last'], 'direction':'F'}\n elif key == e['last'] and e['flow'] > 0:\n v = {'id':e['first'], 'direction':'B'}\n else: continue\n \n if v['id'] not in cl: \n cl.append(v['id'])\n if key not in na:\n na[key] = []\n\n if v['id'] not in vl:\n vl[v['id']] = level\n if vl[v['id']] == level:\n na[key].append(v)\n e['used'] = True\n\n \n if len(fl) == 0: fl = cl; cl = deque([]); level += 1\n if len(fl) == 0 or t in fl: break\n\n complete = False\n # remove all vertex except t from last layer\n for n in [k for k in vl if vl[k] == level-1]: \n if n == t:\n complete = True\n continue\n for k in na:\n na[k] = [v for v in na[k] if v['id'] != n]\n\n \n return {'na':na, 'complete':complete}", "def edge_apply(op, edge):\n vs = frozenset(op[v] for v in EDGES[edge])\n return EDGES_BY_VERTSET[vs]", "def add_edges(self):\n for u in self.G.nodes():\n for v in self.G.nodes():\n if u != v and u != \"Sink\" and v != \"Source\":\n self.G.add_edge(\n u, v, cost=self.manhattan(u, v), time=self.manhattan(u, v)\n )", "def __generate_edges(self):\n\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append( {vertex,neighbour} )\n return edges", "def __generate_edges(self):\r\n edges = []\r\n for vertex in self.__graph_dict:\r\n for neighbor in self.__graph_dict[vertex]:\r\n if {neighbor, vertex} not in edges:\r\n edges.append({vertex, neighbor})\r\n return edges", "def _get_edges_to_mapped_vertices(graph, vertex_id):\n subgraph_edges_to_mapped_vertices = []\n for e in graph.get_edges(vertex_id):\n t_neighbor = graph.get_vertex(e.other_vertex(vertex_id))\n if not t_neighbor:\n raise VitrageAlgorithmError('Cant get vertex for edge %s' % e)\n if t_neighbor and t_neighbor.get(MAPPED_V_ID):\n subgraph_edges_to_mapped_vertices.append(e)\n return set(subgraph_edges_to_mapped_vertices)", "def transform(self, ugraph):\n return self._transform(ugraph)", "def remove_edges(g, edgelist):\n for edge in edgelist:\n (u, v) = tuple(edge)\n g[u].remove(v)\n g[v].remove(u)" ]
[ "0.6852421", "0.6568463", "0.64864695", "0.6437077", "0.64132136", "0.6352795", "0.63091815", "0.6269146", "0.6220492", "0.6137458", "0.6101099", "0.6094812", "0.60881406", "0.5988933", "0.5956525", "0.59382725", "0.5912368", "0.58929366", "0.58926547", "0.5890905", "0.58779806", "0.5871682", "0.586371", "0.5863346", "0.585814", "0.5839555", "0.58341557", "0.5822942", "0.57883984", "0.5783264" ]
0.6906566
0
Scans edges list and calculates degree for each vertex, also saving indexes of related edges for each vertex
def get_degrees_dictionary(edges): dd = {} # degrees dictionary for vertexes def append_vertex(vertex, edge_index): if vertex not in dd.keys(): dd[vertex] = [1, edge_index] else: dd[vertex][0] += 1 dd[vertex].append(edge_index) e = edges for i in range(len(e)): append_vertex(e[i][0], i) append_vertex(e[i][1], i) return dd
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _calculate_degree_centrality(self, vertices, edges):\n # here we are calculating our own deg cen res on the fly\n # edge counts will store the number of edges associated with\n # each vertex\n edge_counts = {}\n\n # get the edge frame in pandas form and iterate\n edge_pandas = edges.to_pandas()\n for (index, row) in edge_pandas.iterrows():\n # extract src and dest node index\n src = int(row[\"src\"])\n dest = int(row[\"dst\"])\n # now we increment the count for that node\n # in edge_counts, or initialize it to one\n # if it doesn't exist\n if src not in edge_counts.keys():\n edge_counts[src] = 1\n else:\n edge_counts[src] = edge_counts[src] + 1\n if dest not in edge_counts.values():\n edge_counts[dest] = 1\n else:\n edge_counts[dest] = edge_counts[dest] + 1\n return edge_counts", "def calc_degree(graph_rdd):\n all_degree = graph_rdd \\\n .map(swap) \\\n .union(graph_rdd) \\\n .map(lambda (x, y): (x, 1)) \\\n .reduceByKey(add, numPartitions=40)\n return all_degree", "def _compute_node_degrees(self):\n mes = []\n args = []\n for metaedge, matrix in self.adj_matrices.items():\n mes.append(metaedge)\n args.append(matrix)\n res = parallel_process(array=args, function=mt.calculate_degrees, n_jobs=self.n_jobs, front_num=0)\n for metaedge, (out_degree, in_degree) in zip(mes, res):\n self.out_degree[metaedge] = out_degree\n self.in_degree[metaedge] = in_degree", "def vert_degree(input_vertices):\n\tvertex_map = {}\n\tfor element in input_vertices:\n\t\tvertex_map[element] = 0\n\t\tfor x in prob:\n\t\t\tfor vertex in x:\n\t\t\t\tif element == vertex:\n\t\t\t\t\tvertex_map[element] += 1\n\treturn vertex_map", "def degree(adj_mat, vertex):\n return np.sum(adj_mat[vertex][:])", "def compute_in_degrees(digraph):\n # initialize in-degrees dictionary with zero values for all vertices\n in_degree = {}\n for vertex in digraph:\n in_degree[vertex] = 0\n # consider each vertex\n for vertex in digraph:\n # amend in_degree[w] for each outgoing edge from v to w\n for neighbour in digraph[vertex]:\n in_degree[neighbour] += 1\n return in_degree", "def edge_list_build(input_path, output_path):\n\n start_time = time.time()\n\n df = pd.read_csv(input_path, sep='\\t', header=None)\n\n for col in range(1, len(df.columns)):\n df.iloc[:, col] = df.iloc[:, col-1] + '_' + df.iloc[:, col]\n\n n_divs = len(df.columns) - 1\n\n\n dict_node_names = {}\n\n for id, node_name in enumerate(np.unique(df.values.flatten())):\n dict_node_names[node_name] = id + 1\n\n tmp_df = pd.DataFrame.from_dict(dict_node_names, orient='index')\n tmp_df.reset_index(inplace=True)\n tmp_df.rename({'index': 'nodes', 0: 'hash'}, inplace=True, axis=1)\n\n hash_df = tmp_df['nodes'].str.split('_', n=n_divs, expand=True)\n hash_df = pd.concat([hash_df, tmp_df['hash']], axis=1)\n\n for col_name in df.columns:\n df[col_name] = df[col_name].map(dict_node_names)\n\n df['root'] = 0\n colnames = df.columns.values\n colnames = list(colnames[-1:]) + list(colnames[:-1])\n df = df[colnames]\n\n df_tuples = pd.DataFrame()\n\n for i in range(len(df.columns) - 1):\n df_tuples[i] = list(df[df.columns[i:i + 2]].itertuples(index=False, name=None))\n del df\n gc.collect()\n\n nodes_list = []\n\n for col_id in range(0, df_tuples.shape[1]):\n father_child = df_tuples.iloc[:, col_id].drop_duplicates().values\n nodes_list.extend(father_child)\n\n graph = nx.DiGraph(nodes_list)\n graph_bfs = nx.bfs_tree(graph, 0)\n \n path = output_path + '.hashmap'\n hash_df.to_csv(path, index=False, sep='\\t')\n end_time = time.time()\n print(\"Time spent creating tree from csv file:\", end_time - start_time)\n return graph_bfs", "def get_edges(self):\n for i in self.gens:\n if self.active[i]:\n elist = set()\n H = (i,) # edge-stabilizing subgroup\n reps = set(self.word_generator(parabolic=H))\n reps = self.G.sort_words(reps)\n for word in reps:\n v1 = self.G.move(self.vtable, 0, word)\n v2 = self.G.move(self.vtable, 0, word + (i,))\n if v1 is not None and v2 is not None:\n if v1 > v2:\n v1, v2 = v2, v1\n if (v1, v2) not in elist:\n elist.add((v1, v2))\n\n self.edge_indices[i] = elist\n\n self.num_edges = sum(len(L) for L in self.edge_indices.values())", "def compute_in_degrees (digraph) :\n in_degree = dict()\n\n # initialize the in-degree of each node with 0s\n for key in digraph :\n in_degree[key] = 0\n\n for node in digraph :\n for head_node in digraph[node] :\n in_degree[head_node]+=1\n\n return in_degree", "def get_transition_vertexes(dd, start, end):\n # from degrees dictionary choose vertexes with degree == 2\n # except for start and end vertex, combine a list of\n # such vertexes with indexes of their edges in edges list\n\n v_indexes = {}\n for v in dd:\n if dd[v][0] == 2 and v != start and v != end:\n v_indexes[v] = [dd[v][1], dd[v][2]]\n\n return v_indexes", "def detection_algorithm(G, edge_weight):\n Gc = G.copy()\n set_node_attributes(Gc, attr_name='k-index')\n seed_node2communities = {}\n\n from operator import itemgetter\n while Gc.number_of_nodes() > 0:\n seed_node = max(list(Gc.nodes(data='k-index')), key=itemgetter(1))[0]\n nodes_in_community, modularity = find_local_community(Gc, seed_node=seed_node, weight=edge_weight)\n seed_node2communities[seed_node] = (nodes_in_community, modularity)\n Gc.remove_nodes_from(nodes_in_community)\n return seed_node2communities", "def getDegreeDistribution(self):\n degreeDistribution = {}\n degreeCount = self.__degreeCount\n vertexNumbers = self.vertexIndex.keys()\n \n for vertexNumber in vertexNumbers:\n try:\n numberOfNeighbors = degreeCount[vertexNumber]\n except KeyError:\n numberOfNeighbors = 0\n\n\n \n try:\n degreeDistribution[numberOfNeighbors] += 1\n except KeyError:\n degreeDistribution[numberOfNeighbors] = 1\n return degreeDistribution", "def _degree_verts(g):\n n = len(g)\n # degs = map(len, g) is a tiny bit slower than the following line\n degs = [ len(g[v]) for v in range(n) ]\n dv = dict()\n for v in range(n):\n degnbr = [0] * n\n for w in g[v]:\n degnbr[degs[w]] += 1\n # Could use defaultdict below, but it does not seem to be faster\n dv.setdefault(tuple(degnbr), []).append(v)\n return dv", "def get_adj_and_degrees(num_nodes, triplets):\n adj_list = [[] for _ in range(num_nodes)]\n for i, triplet in enumerate(triplets):\n adj_list[triplet[0]].append([i, triplet[2]])\n adj_list[triplet[2]].append([i, triplet[0]])\n\n degrees = np.array([len(a) for a in adj_list])\n adj_list = [np.array(a) for a in adj_list]\n return adj_list, degrees", "def compute_degrees(self, graph):\n\n g_vertices = graph.vertices\n g_edges = graph.edges\n\n # Get unweighted degrees\n indeg = graph.inDegrees\n outdeg = graph.outDegrees\n\n # Get weighted degrees\n w_indeg = (g_edges.groupby(\"dst\").agg(sum(\"weight\").alias(\"w_inDegree\"))).selectExpr(\"dst as id\",\n \"w_inDegree as w_inDegree\")\n w_outdeg = (g_edges.groupby(\"src\").agg(sum(\"weight\").alias(\"w_outDegree\"))).selectExpr(\"src as id\",\n \"w_outDegree as w_outDegree\")\n # Update vertices attribute\n new_v = g_vertices.join(indeg, \"id\", \"left_outer\")\n new_v = new_v.join(outdeg, \"id\", \"left_outer\")\n new_v = new_v.join(w_indeg, \"id\", \"left_outer\")\n new_v = new_v.join(w_outdeg, \"id\", \"left_outer\")\n new_v = new_v.na.fill(0)\n\n # Update graph\n self.graph = GraphFrame(new_v, g_edges)", "def vertice_degree(self):\r\n if(self.is_empty()):\r\n raise ValueError(\"Graph is empty.\")\r\n else:\r\n if(self.__directed):\r\n degrees = {}\r\n l = list(self.__graph_dict.values())\r\n flatter = []\r\n for x in l:\r\n for y in x:\r\n flatter.append(y)\r\n\r\n for k in self.__graph_dict.keys():\r\n degrees[k] = len(self.__graph_dict[k])\r\n if(k in flatter):\r\n degrees[k] += flatter.count(k)\r\n return degrees\r\n\r\n else:\r\n degrees = {}\r\n for k in self.__graph_dict.keys():\r\n degrees[k] = len(self.__graph_dict[k])\r\n return degrees", "def adjacencyLists():\r\n anyNeighbor = lambda u: any(self.neighbors(u))\r\n verticesWithNeighbors = filter(anyNeighbor, sorted(self.vertices()))\r\n return map(edgesFromVertex, verticesWithNeighbors)", "def adjacencyLists():\r\n anyNeighbor = lambda u: any(self.neighbors(u))\r\n verticesWithNeighbors = filter(anyNeighbor, sorted(self.vertices()))\r\n return map(edgesFromVertex, verticesWithNeighbors)", "def in_degree_distribution(digraph):\n\tdist_in_degree = {}\n\tzero_in_count = 0\n\t\n\t# Returns:\n\t# { key, i.e., in-degree, number of edges coming into a node: \n\t# value, i.e., int, number of nodes with this value for in-degree }\n\n\t# first, create a temporary 2d list, each interior list containing (1) a key or in-degree and (2) a value or number of nodes with this corresponding in-degree", "def edgify(vertices:list)->list:\n edges = []\n for k in range(0, len(vertices) - 1):\n edges.append([vertices[k], vertices[k + 1]])\n return edges", "def build_edges(self):\n print(\"Constructing Edges.\")\n # -----------------------------------------\n # TODO: You should write this method!\n\n # Note: this method may take some time to run - it is likely to be O(N^2), and some lists have N = 10,000 words or more.\n # (I've had students decide that their program was \"broken\" and quit it before this process finished... every time,\n # not realizing that the program was working hard behind the scenes.)\n # I recommend that you keep track of the number of edges you have added, and if it is a multiple of 1000, print\n # something so that you know your program is making progress.\n n = len(self.vertices)\n\n\n\n \n # -----------------------------------------\n print(\"Done Constructing Edges.\\n------------------------------------\")", "def eligible_edges_with_indexes(self):\n return list(map(lambda e: (self.edges.index(e), e), self.eligible_edges))", "def in_degree_distribution(graph):\n in_degrees = collections.Counter()\n for node in graph.nodes(data=True):\n in_degrees[graph.in_degree(node[0])] += 1\n\n in_degrees = sorted(in_degrees.items(), key=lambda x: x[0])\n\n print(in_degrees)", "def in_degree_distribution (digraph) :\n\n in_degree_dist = dict ()\n in_degrees = compute_in_degrees (digraph)\n\n for node in in_degrees :\n if in_degrees[node] in in_degree_dist :\n in_degree_dist[in_degrees[node]] += 1\n else :\n in_degree_dist[in_degrees[node]] = 1\n\n return in_degree_dist", "def connected_components(vertices, edges):\n\n l = dict()\n\n for v in vertices:\n l[v] = ds.make_set(v)\n\n for edge in edges:\n if ds.find_set(l[edge[0]]) != ds.find_set(l[edge[1]]):\n ds.union(l[edge[0]], l[edge[1]])\n\n roots = [l[v] for v in vertices if l[v].parent == l[v]]\n cc = list()\n\n for root in roots:\n cc.append(list(map(lambda x: x.data, root.descendants())))\n\n return cc", "def path_to_edges(self):\n\n edges = [0 for i in range(self.graph.num_edges)]\n\n for row in range(self.graph.rows):\n for col in range(self.graph.cols):\n if self.path[row][col]:\n if row + col < self.graph.cols - 1:\n if col < self.graph.cols - 1 and self.path[row][col + 1]:\n #print \"(%d,%d) (%d,%d)\" % (row, col, row, col + 1)\n edge_number = self.graph.diags[row + col] + 2 * row\n edges[edge_number] = 1\n if row < self.graph.rows - 1 and self.path[row + 1][col]:\n #print \"(%d,%d) (%d,%d)\" % (row, col, row + 1, col)\n edge_number = self.graph.diags[row + col] + 1 + 2 * row\n edges[edge_number] = 1\n else:\n col_dist = self.graph.cols - col - 1\n if col < self.graph.cols - 1 and self.path[row][col + 1]:\n #print \"(%d,%d) (%d,%d)\" % (row, col, row, col + 1)\n edge_number = self.graph.diags[row + col] + 2 * col_dist - 1\n edges[edge_number] = 1\n if row < self.graph.rows - 1 and self.path[row + 1][col]:\n #print \"(%d,%d) (%d,%d)\" % (row, col, row + 1, col)\n edge_number = self.graph.diags[row + col] + 2 * col_dist\n edges[edge_number] = 1\n \n\n return edges", "def in_degree_distribution(digraph):\n # find in_degrees\n in_degree = compute_in_degrees(digraph)\n # initialize dictionary for degree distribution\n degree_distribution = {}\n # consider each vertex\n for vertex in in_degree:\n # update degree_distribution\n if in_degree[vertex] in degree_distribution:\n degree_distribution[in_degree[vertex]] += 1\n else:\n degree_distribution[in_degree[vertex]] = 1\n return degree_distribution", "def _initializeAdjacencyList(self):\n\n if self.comm.rank == 0:\n # First, create a dictionary of common edges shared by components\n edgeToFace = {}\n for elemID in self.bdfInfo.elements:\n elemInfo = self.bdfInfo.elements[elemID]\n elemConn = elemInfo.nodes\n compID = self.meshLoader.nastranToTACSCompIDDict[elemInfo.pid]\n nnodes = len(elemConn)\n if nnodes >= 2:\n for j in range(nnodes):\n nodeID1 = elemConn[j]\n nodeID2 = elemConn[(j + 1) % nnodes]\n\n if nodeID1 < nodeID2:\n key = (nodeID1, nodeID2)\n else:\n key = (nodeID2, nodeID1)\n\n if key not in edgeToFace:\n edgeToFace[key] = [compID]\n elif compID not in edgeToFace[key]:\n edgeToFace[key].append(compID)\n\n # Now we loop back over each element and each edge. By\n # using the edgeToFace dictionary, we can now determine\n # which components IDs (jComp) are connected to the\n # current component ID (iComp).\n self.adjacentComps = []\n\n for edgeKey in edgeToFace:\n if len(edgeToFace[edgeKey]) >= 2:\n for i, iComp in enumerate(edgeToFace[edgeKey][:-1]):\n for jComp in edgeToFace[edgeKey][i + 1 :]:\n if iComp < jComp:\n dvKey = (iComp, jComp)\n else:\n dvKey = (jComp, iComp)\n if dvKey not in self.adjacentComps:\n self.adjacentComps.append(dvKey)\n\n else:\n self.adjacentComps = None\n\n # Wait for root\n self.comm.barrier()", "def add_vertices(self, vertices: Iterable[\"Vertex\"]) -> Sequence[int]:\n indices = []\n for vertex in vertices:\n key = self.key(vertex)\n try:\n indices.append(self.ledger[key])\n except KeyError:\n index = len(self.vertices)\n self.vertices.append(vertex)\n self.ledger[key] = index\n indices.append(index)\n return tuple(indices)", "def compute_in_degrees(digraph):\n num_degree = {}\n for dummy_node in digraph:\n num_degree[dummy_node] = 0\n for key in digraph:\n for node in digraph[key]:\n num_degree[node] += 1\n return num_degree" ]
[ "0.66839856", "0.62963444", "0.6290213", "0.6239078", "0.60592306", "0.5999901", "0.5971899", "0.5934545", "0.5895978", "0.58021456", "0.5762711", "0.5754184", "0.57531744", "0.57054716", "0.56767124", "0.5667014", "0.56646866", "0.56646866", "0.5633163", "0.5631301", "0.56272477", "0.5623497", "0.56152356", "0.5599932", "0.55882716", "0.5581957", "0.55640316", "0.5562826", "0.5552608", "0.55456996" ]
0.6403898
1
scans edges data from user input
def scan_edges(edges_count): edges = [] for _ in range(edges_count): edge = input("Enter edge:").split(" ") try: edge[2] = int(edge[2]) except ValueError: raise ValueError("Input data parsing error, " "the format should be like \"s s 3\"") edges.append(edge) return edges
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_input(E):\n # ---------- INSERT CODE BELOW ----------\n edge_list = []\n\n for _ in range(E):\n src, dst, cost = input('').rstrip('\\r\\n').split()\n edge_list.append((int(src),int(dst),int(cost)))\n \n return edge_list\n # ---------- INSERT CODE ABOVE ----------", "def iterate_inbound_edges(self):\n vertex = int(input('enter vertex: '))\n try:\n vertices = self._graph.get_inbound_edges(vertex)\n except ValueError as ve:\n print(ve)\n return\n print('Inbound edges from ' + str(vertex) + ':')\n for v in vertices:\n cost = self._graph.get_cost(v, vertex)\n print('Edge from ' + str(v) + ' to ' + str(vertex) + ' with cost ' + str(cost))", "def main():\n n = int(input(\"Enter the number of nodes: \"))\n m = int(input(\"Enter the number of edges: \"))\n \n adjList = [[] for i in range(n)]\n \n print(\"Enter the edges: \")\n for i in range(m):\n x, y = input().split(\" \")\n x = int(x)\n y = int(y)\n adjList[x].append(y)\n adjList[y].append(x)\n \n s = int(input(\"Enter the source: \"))\n \n DFS(adjList, s, n)", "def __filterEdges(self):", "def run(version=1):\n\n # scan header to define our graph parameters\n try:\n header = input(\"Enter graph header:\")\n edges_count, start_edge, finish_edge = header.split(\" \")\n edges_count = int(edges_count)\n logger.debug(\"Scanned edges count: {}; Start:{}, End:{}\".format(\n edges_count, start_edge, finish_edge))\n except ValueError:\n raise ValueError(\"Input data parsing error, \"\n \"the format should be like \\\"3 a b\\\"\")\n\n # scan edges\n edges = scan_edges(edges_count)\n logger.debug(\"Scanned edges: {}\".format(edges))\n\n optimize(edges, start_edge, finish_edge)\n\n print_output(edges)", "def iterate_outbound_edges(self):\n vertex = int(input('enter vertex: '))\n try:\n vertices = self._graph.get_outbound_edges(vertex)\n except ValueError as ve:\n print(ve)\n return\n print('Outbound edges from ' + str(vertex) + ':')\n for v in vertices:\n cost = self._graph.get_cost(vertex, v)\n print('Edge from ' + str(vertex) + ' to ' + str(v) + ' with cost ' + str(cost))", "def process_input(input_path):\n\n # Parse lines from input file into list\n with open(input_path, 'r') as input_file:\n lines = input_file.readlines()\n\n # Declare component lists and helper variables\n vertex_map = {} # Mapping of named vertices to indices, handles duplicate connections\n idx = 0\n edges = [] # List of (src, dst) tuples\n weights = [] # Weight of each edge\n\n for line in lines:\n # Parse each line of csv or text file\n if input_path.endswith('.csv'):\n parts = line.split(',')\n else:\n parts = line.split()\n\n # Add source vertex to list of vertices\n src = parts[0]\n if src not in vertex_map:\n vertex_map[src] = idx\n idx += 1\n\n # Add destination vertex to list of vertices\n dst = parts[1]\n if dst not in vertex_map:\n vertex_map[dst] = idx\n idx += 1\n\n # Add integer representation of edges to list of connections\n edges.append((vertex_map[src], vertex_map[dst]))\n weights.append(parts[2])\n\n # Get definite list of vertices\n vertices = vertex_map.keys()\n\n # Print graph information\n vprint(str(len(vertices)) + ' vertices')\n vprint(str(len(edges)) + ' edges')\n\n # Build IGraph representation of network\n graph = ig.Graph(edges, directed=False)\n graph.es['weight'] = [weights[e] for e in range(len(graph.es))]\n\n return graph, vertices", "def read_graph(args):\n dataset = pd.read_csv(args.features_path).values.tolist()\n edges = {}\n edges[\"positive_edges\"] = [edge[0:2] for edge in dataset if edge[2] == 1]\n edges[\"negative_edges\"] = [edge[0:2] for edge in dataset if edge[2] == -1]\n edges[\"ecount\"] = len(dataset)\n edges[\"ncount\"] = len(set([edge[0] for edge in dataset]+[edge[1] for edge in dataset]))\n return edges", "def main():\n n = int(input(\"Enter the number of nodes: \"))\n m = int(input(\"Enter the number of edges: \"))\n \n adjList = [[] for i in range(n)]\n \n print(\"Enter the edges: \")\n for i in range(m):\n x, y = input().split(\" \")\n x = int(x)\n y = int(y)\n adjList[x].append(y)\n adjList[y].append(x)\n \n s = int(input(\"Enter the source: \"))\n \n BFS(adjList, s, n)", "def visitEdges(self) -> None:\n\n for node in self.nodesMap_.values():\n for nodeInput in node.get_inputs():\n i = nodeInput[0]\n if i.get_name() not in self.nodesMap_:\n print(i.get_kind_name(), i.get_name())\n edgeStr = self.get_unique_vertex_name(i) + \":Outputs -> \"\n edgeStr += self.get_unique_vertex_name(node) + \":Inputs\"\n self.edges_.append(edgeStr)", "def pathFinder(graph, sourceVertex):\r\n \r\n # List removeEdges will be used to store the edges that will be removed from the graph dictionary after the loop ends. Makes the code more efficient, as you don't want to loop through a million vertices every time, now do you?\r\n removeEdges = []\r\n \r\n # Loop through edges in the graph, will be used to find adjacent vertices.\r\n for edge in graph[\"E\"]:\r\n \r\n # If the sourceVertex is in the edge and the edge is not discovered yet, then edit and change values in the main dictionary, dataInfo.\r\n if (sourceVertex in edge) and (dataInfo[str(symmetricVertex(edge, sourceVertex))] != \"gray\"):\r\n otherVertex = symmetricVertex(edge, sourceVertex)\r\n \r\n # Adds variable otherVertex to the descendants of the sourceVertex.\r\n dataInfo[str(sourceVertex)][\"descendants\"].append(otherVertex)\r\n \r\n # Updates key(otherVertex) to correct values. Ancestor is always the sourceVertex, the distance is always the distance of sourceVertex incremented by one, and the color is updated to gray as it is added to the queue.\r\n dataInfo[str(otherVertex)] = {\"ancestor\": sourceVertex, \"descendants\": [], \"distance\": (dataInfo[str(sourceVertex)][\"distance\"] + 1), \"color\": \"gray\"}\r\n \r\n # Edge includes two discovered edges, so it will be removed to stop redundancy. It is added to the removeEdges list.\r\n removeEdges.append(edge)\r\n \r\n # Appends the discovered vertex to the queue.\r\n queue.append(otherVertex)\r\n \r\n # After the loop ends, the edges that contain the source vertex have been exhausted, so the color is updated to black.\r\n dataInfo[str(sourceVertex)][\"color\"] = \"black\" \r\n \r\n # If the sourceVertex is in the queue, it is removed, as all of the edges containing it have been exhausted.\r\n if sourceVertex in queue:\r\n queue.remove(sourceVertex)\r\n \r\n # Loop through the edges in the removeEdges list, each edge will be removed.\r\n for edge in removeEdges:\r\n graph[\"E\"].remove(edge)", "def read_edges(f=sys.stdin):\n edges = []\n k = ['first', 'last', 'capacity', 'flow', 'used']\n lines = f.readlines()\n for line in lines:\n v = [int(s) for s in line.split(\" \")] + [0, False]\n edges.append(dict(zip(k,v)))\n \n\n return edges", "def find_edges(self):\n self.edges = [deepcopy(self.grid[0]), [], deepcopy(self.grid[-1]), []]\n for g in self.grid:\n self.edges[3].append(g[0])\n self.edges[1].append(g[-1])\n self.edges[2]\n self.edges[3]", "def check_edges(self):\n start = int(input('Enter start vertex: '))\n end = int(input('Enter end vertex: '))\n if self._graph.is_edge_between(start, end):\n print('There is an edge from ' + str(start) + ' to ' + str(end))\n else:\n print('There is NO edge from ' + str(start) + ' to ' + str(end))", "def read_input():\n input()\n size = int(input().split()[-1])\n nb_edges = int(input().split()[-1])\n\n g = UndirectedGraph()\n\n if parameters.DEBUG:\n print('Build nodes')\n\n nodes = [g.add_node() for _ in range(size)]\n\n if parameters.DEBUG:\n print('Build edges')\n edges = []\n weights = {}\n i = 0\n for i in range(nb_edges):\n if parameters.DEBUG:\n i += 1\n if i % 1000 == 0:\n print('Edge %d / %d' % (i, nb_edges))\n line = input()\n _, u, v, w = line.split()\n\n e = g.add_edge(nodes[int(u) - 1], nodes[int(v) - 1])\n weights[e] = int(w)\n\n edges.append((int(u), int(v), int(w)))\n\n line = input()\n while 'Terminals' not in line:\n line = input()\n if 'SECTION' in line:\n line = input()\n while 'Terminals' not in line:\n line = input()\n nb_terms = int(line.split()[-1])\n terms = []\n for i in range(nb_terms):\n line = input()\n _, t = line.split()\n terms.append(nodes[int(t) - 1])\n\n return instances.SteinerInstance(g, terms, weights)", "def getedge(self):\n cmd=\"getEdge(\"+self.board+\",\"+self.inpedge+\")\"\n output=self.vb.io.execute(cmd,log=\"out\",applout=\"<>\")\n #print 'edge= ',output\n self.edge=output[0]\n self.inputedge.setEntry(self.edge)\n if self.board != '0': \n self.delay=output[1] \n self.inputdelay.setEntry(self.delay)", "def test_consider_edges(self):\n path = os.path.join(get_file_dir(), 'data', 'GO_edges_consider.json')\n with open(path, 'rt') as json_file:\n json_files = []\n for data in json_file:\n json_files.append(json.loads(data))\n for entry in json_files:\n if entry[\"id\"] == \"GO:0000211__GO:0005515__consider\":\n self.assertEqual(entry[\"from\"], \"GO_term/GO:0000211\")\n self.assertEqual(entry[\"to\"], \"GO_term/GO:0005515\")", "def draw_edges(img, data_vertex, data_edges):\r\n i = 0\r\n for v1, v2, v3 in data_edges: # get the numbers of string\r\n # # v1, v2, v3 = v1 - 1, v2 - 1, v3 - 1 # change the numbering\r\n # print(v1,v2,v3)\r\n img = draw_line(img, data_vertex, v1, v2)\r\n img = draw_line(img, data_vertex, v1, v3)\r\n img = draw_line(img, data_vertex, v2, v3)\r\n i += 1\r\n # print(i)\r\n return img", "def scan(self, mask):", "def test_incoming_edge_traversals(self):\r\n e1 = TestEdge.create(self.v1, self.v2, numbers=12)\r\n e2 = TestEdge.create(self.v1, self.v3, numbers=13)\r\n e3 = OtherTestEdge.create(self.v2, self.v3, numbers=14)\r\n\r\n results = self.v2.inE()\r\n assert len(results) == 1\r\n assert e1 in results\r\n\r\n results = self.v2.inE(types=[OtherTestEdge])\r\n assert len(results) == 0", "def find_edges(text_box, edge_boxes):\n selected_edge_boxes = []\n for box in edge_boxes:\n x, y, w, h = box\n if text_box[0].start <= y and text_box[0].stop >= y + h and text_box[1].start <= x and text_box[\n 1].stop >= x + w:\n selected_edge_boxes.append(box)\n return selected_edge_boxes", "def check_input(nodes, num_edges):\n num_nodes = len(nodes)\n min_edges = num_nodes - 1\n if num_edges < min_edges:\n raise ValueError('num_edges less than minimum (%i)' % min_edges)\n max_edges = num_nodes * (num_nodes - 1)\n if num_edges > max_edges:\n raise ValueError('num_edges greater than maximum (%i)' % max_edges)", "def mark_visited(data):\n while True:\n try:\n mark_visited = int(input(\"Enter the number of a place to mark as visited \\n>>> \")) - 1\n break\n except ValueError:\n print(\"Invalid input, enter a valid number\")\n\n while mark_visited > len(data) - 1:\n print(\"Invalid place number\")\n mark_visited = int(input(\"Enter the number of a place to mark as visited \\n>>> \")) - 1\n\n if data[mark_visited][3] == \"v\":\n print(\"That place is already visited \\n\")\n\n else:\n data[mark_visited][3] = \"v\"\n data[mark_visited][4] = \"\"\n print(\"{0} in {1} visited! \\n\".format(data[mark_visited][0], data[mark_visited][1]))\n\n return data", "def find_matching_edges(self, outp, inp, label):\n return [a for a in self.edges\n if ((a['_label'] == label) and\n ((a['_out'] == outp and a['_in'] == inp) or \n (a['_in'] == outp and a['_out'] == inp)))]", "def _scanning_loop(self, indices_to_nodes, node_property,\n factor_aggregator, compute_statistics,\n total_factor_instances,\n all_edges, reverse_edges, task_queue,\n generated_edges, limit=None, verbose=False):\n first_scan = True\n\n while True:\n try:\n source_index = task_queue.get(timeout=0.1)\n if source_index == SENTINEL:\n break\n except queue.Empty:\n pass\n else:\n if first_scan:\n first_scan = False\n edge_list = self._scan_targets(\n indices_to_nodes, node_property, source_index,\n factor_aggregator, compute_statistics,\n total_factor_instances,\n all_edges, reverse_edges, limit=limit,\n verbose=verbose)\n generated_edges += edge_list", "def build_edges(self):\n print(\"Constructing Edges.\")\n # -----------------------------------------\n # TODO: You should write this method!\n\n # Note: this method may take some time to run - it is likely to be O(N^2), and some lists have N = 10,000 words or more.\n # (I've had students decide that their program was \"broken\" and quit it before this process finished... every time,\n # not realizing that the program was working hard behind the scenes.)\n # I recommend that you keep track of the number of edges you have added, and if it is a multiple of 1000, print\n # something so that you know your program is making progress.\n n = len(self.vertices)\n\n\n\n \n # -----------------------------------------\n print(\"Done Constructing Edges.\\n------------------------------------\")", "def test_all_known_edges(self):\n conn, cursor = get_db_cursor()\n build = \"toy_build\"\n database = \"scratch/toy.db\"\n talon.get_counters(database)\n edge_dict = init_refs.make_edge_dict(cursor)\n run_info = talon.init_run_info(database, build)\n conn.close()\n\n chrom = \"chr1\"\n vertex_IDs = [ 1, 2, 3, 4, 5, 6]\n strand = \"+\"\n edge_IDs, novelty = talon.match_all_transcript_edges(vertex_IDs, strand,\n edge_dict, run_info)\n\n assert edge_IDs == ( 1, 2, 3, 4, 5 ) \n assert novelty == ( 0, 0, 0, 0, 0 )", "def search_from_igraph():\n key = request.args.get('key')\n graph = FileStructureProcessor()\n return graph.search_from_igraph(key)", "def read_graph():\n return nx.read_edgelist('edges_new.txt', delimiter='\\t')", "def main(args):\n\n # Take the adjacency matrix of the graph\n adjacency_mat_graph = parse_adjacency_matrix_csv(args.i)\n\n # Check if the number of row == number of columns of the matrix\n logging.info(\"Checking the matrix shape...\")\n number_row_edges = len(adjacency_mat_graph)\n number_col_edges = len(adjacency_mat_graph[0])\n if number_row_edges != number_col_edges:\n raise ValueError(f\"The number of nodes detected in row\"\n f\"{number_row_edges} not equal to the \"\n f\"number of nodes found in column \"\n f\"{number_col_edges}.\")\n\n # check and verify if graph has only one root\n logging.info(\"Checking if graph has only one root and verify...\")\n root, number_root = unique_root_node_checking(adjacency_mat_graph)\n\n if root == \"NA\" and number_root == 0:\n raise ValueError(f\"No root node found in graph\")\n elif root == \"NA\" and number_root > 0:\n raise ValueError(\"More than one root node found in graph\")\n\n # check if identify root is the same as the one provided\n if root != \"NA\" and root != args.r:\n raise ValueError(f\"Expected {args.r} as root node but {root}\"\n f\" found in graph.\")\n\n # Convert adjacency matrix to dict representation for further steps\n logging.info(\"Converting matrix to dictionary graph...\")\n dict_graph = adjacency_matrix_to_dict_graph(adjacency_mat_graph)\n\n # Check if provided node is in the graph\n logging.info(\"Checking if node of interest n in graph...\")\n if args.n not in dict_graph:\n raise ValueError(f\"Provided node of interest {args.n} \"\n f\"is not found in the provided graph.\")\n\n # Check if the provided graph is acyclic\n logging.info(\"Checking if graph is acyclic...\")\n is_cyclic = is_cyclic_graph(dict_graph)\n if is_cyclic:\n raise ValueError(\"The provided graph is not acyclic. \"\n \"At least one cycle was detected in graph.\")\n logging.info(\"THE PROVIDED GRAPH IS DAG !!!\")\n\n # Get depth\n logging.info(f\"Getting the depth of node {args.n}...\")\n path, depth = get_node_depth(dict_graph, args.r, args.n, [])\n assert len(path) -1 == depth, f\"Length path {path} != depth {depth}\"\n logging.info(f\"The depth of node {args.n} is equal to {depth}\")" ]
[ "0.62639165", "0.61379546", "0.59890026", "0.5863461", "0.5795918", "0.57196283", "0.5619727", "0.5607484", "0.55164105", "0.55157465", "0.54790145", "0.5441284", "0.5408243", "0.53987914", "0.5396176", "0.5338467", "0.5337781", "0.53025645", "0.53013176", "0.53010654", "0.5296391", "0.5269705", "0.52461654", "0.5235501", "0.52267", "0.5223313", "0.5179793", "0.51750606", "0.5172936", "0.51673484" ]
0.69968826
0
prints output in format of edge input
def print_output(edges): for edge in edges: print("{} {} {}".format(edge[0], edge[1], int(edge[2])))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def output(self):\n\t\t# Sort graph nodes by id\n\t\tnodes = list(self.nodes.values())\n\t\tnodes.sort(key=lambda n:n.id)\n\n\t\tfor n in nodes:\n\t\t\t# Get all edges\n\t\t\tedges = []\n\t\t\tfor edge in n.neighbours:\n\t\t\t\tfor neighbour in n.get_neighbours(edge):\n\t\t\t\t\tedges.append((neighbour.id, edge))\n\t\t\tedges.sort()\n\n\t\t\t# Format edges\n\t\t\tformatted = []\n\t\t\tfor edge in edges:\n\t\t\t\tformatted.append(\"%s:%s\" % (edge[0], edge[1] or \"\"))\n\n\t\t\t# Print format\n\t\t\tprint(\"%s [%s]\" % (n, \", \".join(formatted)))", "def print_out(self):\n for node in self.vertices:\n for arc in self.out_arcs_lists[node]:\n s = self.arc_info[arc]['start']\n t = self.arc_info[arc]['destin']\n w = self.arc_info[arc]['weight']\n lb = self.arc_info[arc]['lower_bound']\n u = self.arc_info[arc]['upper_bound']\n print(\"{} {} {} {} flow={}, edgeId={}\".format(s, t, lb, u, w,\n arc))", "def show_edges(self):\n for element in self.graph:\n print(element, self.graph[element])", "def print(self):\n for i, v in enumerate(self._adj):\n if v:\n print(\"vertex {0}\".format(i))\n for e in v:\n print(e)\n print()", "def writeEDGE(self):\n\t\tpass", "def main():\n e = Edge(12, 34, 5.67)\n print(e)", "def make_edge_text(self):\n fmtstr = ub.codeblock(\n '''\n connect from {oport_abs_name}\n to {iport_abs_name}\n ''')\n parts = []\n for iport in self.iports.values():\n for oport in iport.connections:\n if oport is not None:\n part = fmtstr.format(\n oport_abs_name=oport.absname(),\n iport_abs_name=iport.absname(),\n )\n parts.append(part)\n text = '\\n'.join(parts)\n return text", "def __repr__(self):\n s = [\"{} vertices, {} edges\\n\".format(self._V, self._E)]\n for v in range(self._V):\n s.append(\"%d : \" % (v))\n for w in self._adj[v]:\n s.append(\"%d \" % (w))\n s.append(\"\\n\")\n\n return \"\".join(s)", "def __str__(self):\n s = f\"GraphViaEdges '{self.name}',\\nedges :\\n\"\n for edge, edgetype in self.edges.items():\n s += f\" {edge[0]} {edgetype.value} {edge[1]}\\n\"\n\n return s", "def __str__(self):\n s = ''\n for node in self.nodes:\n s += '\\n\\n'+str(node)+'\\n\\t'\n edges = node.getChildren()\n keys = edges.keys()\n keys.sort()\n for key in keys:\n bounds = edges[key].getSuffix()\n s += str(edges[key])+' '\n for i in xrange(bounds[0], bounds[1]):\n s += self.target[i]\n s += '\\n\\t'\n return s", "def print_out_unexplained(self):\n for node in self.vertices:\n for arc in self.out_arcs_lists[node]:\n s = self.arc_info[arc]['start']\n t = self.arc_info[arc]['destin']\n w = self.arc_info[arc]['unexplained_flow']\n print(\"({} {}) unexplained flow={}, edgeId={}\".format(s, t, w,\n arc))", "def __str__(self):\n output = \"\"\n for v, neighbors in self.neighbors.items():\n neighbors = {u: self.weights[(v, u)] for u in neighbors}\n output += str(v) + \" -> \" + str(neighbors) + \"\\n\"\n return output", "def print_graph() -> None:\n raise NotImplementedError", "def printGraph(self):\n print \"-----\"\n for feature in self.features:\n feature.printFeature()\n for constraint in self.constraints:\n constraint.printConstraint()\n print \"-----\"", "def print_model_graph(self, name=None, agent=([], [], [])):\n dot = pygraphviz.AGraph(directed=\"True\")\n for outp in list(self.outputs.keys()):\n dot.add_node(outp, pos=(outp[1:] + \",10\"), color=\"red\", label=outp + \", \" + str(self.outputs[outp].taking.size) + \"-\" + self.outputs[outp].taking.type)\n for inp in list(self.inputs.keys()):\n dot.add_node(inp, pos=(inp[1:] + \",0\"), color=\"blue\", label=inp + \", \" + str(self.inputs[inp].producing.size) + \"-\" + self.inputs[inp].producing.type)\n for comp in list(self.networks.keys()):\n dot.add_node(comp, label=comp + \"-\" + str(type(self.networks[comp].descriptor).__name__)[:-14] + \":\" + str(self.networks[comp].taking.size) + \"-\" + str(self.networks[comp].producing.size))\n\n for c in self.connections:\n con = self.connections[c]\n if self.conn_in_agent(con, agent[0]):\n dot.add_edge(con.input, con.output, label=str(con.name) + \": \" + str(con.info.size) + \" \" + self.comp_by_ind(con.input).producing.type, color=\"blue\")\n elif self.conn_in_agent(con, agent[1]):\n dot.add_edge(con.input, con.output, label=str(con.name) + \": \" + str(con.info.size) + \" \" + self.comp_by_ind(con.input).producing.type, color=\"red\")\n elif self.conn_in_agent(con, agent[2]):\n dot.add_edge(con.input, con.output, label=str(con.name) + \": \" + str(con.info.size) + \" \" + self.comp_by_ind(con.input).producing.type, color=\"green\")\n else:\n dot.add_edge(con.input, con.output, label=str(con.name) + \": \" + str(con.info.size) + \" \" + self.comp_by_ind(con.input).producing.type, color=\"black\")\n dot.layout('dot')\n if not name:\n name = str(hash(self))\n dot.draw(name + '.pdf')", "def show_graph(self):\n print(f'|V| = {self.V}, |E| = {self.E}')\n for n in range(1, self.V+1):\n print(f'[{n}] -> {self.adjacency_list[n]}')", "def __str__(self):\n return \" {north} \\n{west} {east}\\n {south} \".format(**self.edges._asdict())", "def printPath(edgesTo,v):\r\n path = str()\r\n while v is not None:\r\n print(v) \r\n path += str(v) + ' -> ' \r\n v = edgesTo[v]\r\n print(path)", "def print_out():\n pass", "def __str__(self):\n weight = self.weight * self.connectivity\n strio = io.StringIO()\n for i in range(self.dim_in, self.dim_node):\n if i<self.dim_in+self.dim_hid and not self.hidden[i-self.dim_in]: # no such hidden node\n continue\n # strio.write('{:6.1f}'.format(weight[i][0]))\n strio.write('{:.6f}'.format(weight[i][0]))\n for j in range(1, self.dim_node-1):\n if self.dim_in<=j<self.dim_in+self.dim_hid and not self.hidden[j-self.dim_in]: # this node is not connected\n continue\n # strio.write(' {:6.1f}'.format(weight[i][j]))\n strio.write(' {:.6f}'.format(weight[i][j]))\n if i < self.dim_node - 1:\n strio.write('\\n')\n return strio.getvalue()", "def __str__(self):\n # string representation includes values of all inner fields\n return \\\n \"Edge Weight: \" + str(self.weight) + \"\\n\" + \\\n \"Edge Attributes: \" + str(self.attributes) + \"\\n\" + \\\n \"First Incident Node: \\n\" + str(self.first_incident_node.get_name()) + \"\\n\" + \\\n \"Second Incident Node: \\n\" + str(self.second_incident_node.get_name()) + \"\\n\"", "def __repr__(self):\n return 'Edge(%s, %s)' % (repr(self[0]), repr(self[1]))", "def __repr__(self) -> str:\n if self._has_direction:\n return (\n f\"<Edge: from {self._start} to {self._end} \"\n f\"with label '{self._label}'>\"\n )\n return (\n f\"<Edge: between {self._start} and {self._end} \"\n f\"with label '{self._label}'>\"\n )", "def print_node_edge_sets(labels, aside, paths, mode, outf):\n\t#print_gams_set(\"hide(node)\", \"hidden nodes\", aside)\n\t#print \"\"\n\n\t# genes without labels\n\tnovel=set.union(labels[\"unknown\"], aside)\n\tprint_gams_set(\"novelGene(node)\", \"unlabeled or hidden genes\", novel, out=outf)\n\toutf.write(\"\\n\")\n\n\t# interface nodes and edges - assume we've taken care of hiding\n\t# them according to the mode by now\n\thits=set()\n\tintNodes=set()\n\tintEdges=set()\t\n\t\n\t# { pathfinder : { pid : { \"nodes\":[], \"edges\":[] } } }\n\tfor pf in paths:\n\t\tfor pid in paths[pf]:\n\t\t\thits.add(paths[pf][pid][\"nodes\"][0])\n\t\t\tintNodes.add(paths[pf][pid][\"nodes\"][-2])\n\t\t\tintEdges.add(paths[pf][pid][\"edges\"][-1])\n\n\tprint_gams_set(\"hit(node)\", \"hits\", hits, out=outf)\n\toutf.write(\"\\n\")\n\tprint_gams_set(\"intNode(node)\", \"interface nodes\", intNodes, out=outf)\n\toutf.write(\"\\n\")\n\tprint_gams_set(\"intEdge(edge)\", \"interface edges\", intEdges, out=outf)\n\toutf.write(\"\\n\")", "def printNetwork(self):\n\t\ti = 0\n\t\tfor branch in self.collectAllBranches():\n\t\t\t\n\t\t\tprint 'branch', i\n\t\t\tprint 'weight', branch.weight\n\n\t\t\tlUnit = branch.leftUnit\n\t\t\trUnit = branch.rightUnit\n\t\t\t\n\t\t\tprint 'left unit type/activation/output/delta', lUnit.unitType, lUnit.activation, lUnit.output, lUnit.delta\n\t\t\tprint 'right unit type/activation/output/delta', rUnit.unitType, rUnit.activation, rUnit.output, rUnit.delta\n\n\t\t\tprint '\\n'\n\t\t\t\n\t\t\ti+=1", "def pretty_edge(edge_record):\n # String of the form (from_node) ===label===> (to_node)\n from_node = edge_record.get(\"fromNode\", {})\n to_node = edge_record.get(\"toNode\", {})\n label = edge_record.get(\"label\", None)\n\n edge_str = \"(\" + Utils.pretty_node(from_node) + \")\"\n edge_str += \" ===\" + label + \"===> \"\n edge_str += \"(\" + Utils.pretty_node(to_node) + \")\"\n\n return edge_str", "def pretty_print_equation(self):\n\n for n in self.nodes:\n # Get a list of tuples, first is the v\n parents = self.adj_inv[n]\n if len(parents) == 0:\n if self.binary:\n right_side = '{0,1}'\n else:\n right_side = 'N(0, 1)'\n else:\n right_side = ' + '.join(['{:.3f}*x_{}'.format(self.weights[i, n], i)\n for i in parents])\n \n right_side.replace('+ -', '-')\n print('x_{} = {}'.format(n, right_side))", "def print_output(tree):\n print_value(tree)\n print_tree(tree)", "def visitEdges(self) -> None:\n\n for node in self.nodesMap_.values():\n for nodeInput in node.get_inputs():\n i = nodeInput[0]\n if i.get_name() not in self.nodesMap_:\n print(i.get_kind_name(), i.get_name())\n edgeStr = self.get_unique_vertex_name(i) + \":Outputs -> \"\n edgeStr += self.get_unique_vertex_name(node) + \":Inputs\"\n self.edges_.append(edgeStr)", "def __str__(self):\n out = [f'{v}: {self.adj_list[v]}' for v in self.adj_list]\n out = '\\n '.join(out)\n if len(out) < 70:\n out = out.replace('\\n ', ', ')\n return f'GRAPH: {{{out}}}'\n return f'GRAPH: {{\\n {out}}}'" ]
[ "0.755939", "0.7550991", "0.68970513", "0.68945307", "0.6707391", "0.6607396", "0.65338475", "0.650519", "0.6493433", "0.6471962", "0.64449626", "0.6379392", "0.63720196", "0.63169086", "0.6315759", "0.62729573", "0.6227196", "0.6221678", "0.61985976", "0.6175523", "0.61733556", "0.6166147", "0.610964", "0.60973275", "0.60820687", "0.606644", "0.60472757", "0.6010069", "0.59734464", "0.5964304" ]
0.8158672
0
A loading strategy for running large imports as multiple smaller ones. The main functionality of this loader is to split two order iterators in smaller lists while keeping the ordering and their combined length around ``chunk_hint`` size. >>> from importtools import chunked_loader >>> loader = chunked_loader([10, 20, 30, 40], [11, 12, 50, 60], 5) >>> source, destination = loader.next() >>> sorted(source) [10, 20, 30] >>> sorted(destination) [11, 12] >>> source, destination = loader.next() >>> sorted(source) [40] >>> sorted(destination) [50, 60] >>> source, destination = loader.next()
def chunked_loader(ordered_iter1, ordered_iter2, chunk_hint=16384): i1 = _iter_const(ordered_iter1, True) i2 = _iter_const(ordered_iter2, False) iterator = heapq.merge(i1, i2) while True: i1_elemens = list() i2_elemens = list() current_chunk = itertools.islice(iterator, chunk_hint) for element, from_iter1 in current_chunk: if from_iter1: i1_elemens.append(element) else: i2_elemens.append(element) for next_element, next_from_iter1 in iterator: if next_element == element: if next_from_iter1: i1_elemens.append(next_element) else: i2_elemens.append(next_element) else: e = (next_element, next_from_iter1) iterator = itertools.chain([e], iterator) break if not i1_elemens and not i2_elemens: break yield i1_elemens, i2_elemens
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def chunked_mem_sync(source_loader, destination_loader,\n DSFactory=RecordingDataSet, hint=16384):\n l = chunked_loader(source_loader, destination_loader, hint)\n for source, destination in l:\n dest_ds = DSFactory(destination)\n dest_ds.sync(source)\n yield dest_ds", "async def _load_next_chunk(self):\n raise NotImplementedError", "def load_chunk(self, idx):\n for f in self.filenames[idx:]:\n ...", "def test_create_chunks():\n items = list(range(0, 100))\n size = 3\n\n chunks = create_chunks(items, size)\n\n current = next(chunks)\n assert len(current) == size\n assert current == [0, 1, 2]\n\n current = next(chunks)\n assert current == [3, 4, 5]", "def chunks(self, list_to_chunk, size):\n for i in range(0, len(list_to_chunk), size):\n yield list_to_chunk[i:i + size]", "def split_chunk(list, chunk_size):\n for i in range(0, len(list), chunk_size):\n yield list[i:i + chunk_size]", "def chunked(size, source):\n for i in range(0, len(source), size):\n yield source[i : i + size]", "def iterload(filename, chunk=100, **kwargs):\n stride = kwargs.get('stride', 1)\n atom_indices = cast_indices(kwargs.get('atom_indices', None))\n if chunk % stride != 0 and filename.endswith('.dcd'):\n raise ValueError('Stride must be a divisor of chunk. stride=%d does not go '\n 'evenly into chunk=%d' % (stride, chunk))\n if chunk == 0:\n yield load(filename, **kwargs)\n # If chunk was 0 then we want to avoid filetype-specific code in case of undefined behavior in various file parsers.\n else:\n skip = kwargs.pop('skip', 0)\n if filename.endswith('.h5'):\n if 'top' in kwargs:\n warnings.warn('top= kwarg ignored since file contains topology information')\n\n with HDF5TrajectoryFile(filename) as f:\n if skip > 0:\n xyz, _, _, _ = f.read(skip, atom_indices=atom_indices)\n if len(xyz) == 0:\n raise StopIteration()\n if atom_indices is None:\n topology = f.topology\n else:\n topology = f.topology.subset(atom_indices)\n\n while True:\n data = f.read(chunk*stride, stride=stride, atom_indices=atom_indices)\n if data == []:\n raise StopIteration()\n in_units_of(data.coordinates, f.distance_unit, Trajectory._distance_unit, inplace=True)\n in_units_of(data.cell_lengths, f.distance_unit, Trajectory._distance_unit, inplace=True)\n yield Trajectory(xyz=data.coordinates, topology=topology,\n time=data.time, unitcell_lengths=data.cell_lengths,\n unitcell_angles=data.cell_angles)\n\n if filename.endswith('.lh5'):\n if 'top' in kwargs:\n warnings.warn('top= kwarg ignored since file contains topology information')\n with LH5TrajectoryFile(filename) as f:\n if atom_indices is None:\n topology = f.topology\n else:\n topology = f.topology.subset(atom_indices)\n\n ptr = 0\n if skip > 0:\n xyz, _, _, _ = f.read(skip, atom_indices=atom_indices)\n if len(xyz) == 0:\n raise StopIteration()\n while True:\n xyz = f.read(chunk*stride, stride=stride, atom_indices=atom_indices)\n if len(xyz) == 0:\n raise StopIteration()\n in_units_of(xyz, f.distance_unit, Trajectory._distance_unit, inplace=True)\n time = np.arange(ptr, ptr+len(xyz)*stride, stride)\n ptr += len(xyz)*stride\n yield Trajectory(xyz=xyz, topology=topology, time=time)\n\n elif filename.endswith('.xtc'):\n topology = _parse_topology(kwargs.get('top', None))\n with XTCTrajectoryFile(filename) as f:\n if skip > 0:\n xyz, _, _, _ = f.read(skip)\n if len(xyz) == 0:\n raise StopIteration()\n while True:\n xyz, time, step, box = f.read(chunk*stride, stride=stride, atom_indices=atom_indices)\n if len(xyz) == 0:\n raise StopIteration()\n in_units_of(xyz, f.distance_unit, Trajectory._distance_unit, inplace=True)\n in_units_of(box, f.distance_unit, Trajectory._distance_unit, inplace=True)\n trajectory = Trajectory(xyz=xyz, topology=topology, time=time)\n trajectory.unitcell_vectors = box\n yield trajectory\n\n elif filename.endswith('.dcd'):\n topology = _parse_topology(kwargs.get('top', None))\n with DCDTrajectoryFile(filename) as f:\n ptr = 0\n if skip > 0:\n xyz, _, _ = f.read(skip, atom_indices=atom_indices)\n if len(xyz) == 0:\n raise StopIteration()\n while True:\n # for reasons that I have not investigated, dcdtrajectory file chunk and stride\n # together work like this method, but HDF5/XTC do not.\n xyz, box_length, box_angle = f.read(chunk, stride=stride, atom_indices=atom_indices)\n if len(xyz) == 0:\n raise StopIteration()\n in_units_of(xyz, f.distance_unit, Trajectory._distance_unit, inplace=True)\n in_units_of(box_length, f.distance_unit, Trajectory._distance_unit, inplace=True)\n time = np.arange(ptr, ptr+len(xyz)*stride, stride)\n ptr += len(xyz)*stride\n yield Trajectory(xyz=xyz, topology=topology, time=time, unitcell_lengths=box_length,\n unitcell_angles=box_angle)\n\n else:\n log.critical(\"loading complete traj into mem! This might no be desired.\")\n t = load(filename, **kwargs)\n for i in range(skip, len(t), chunk):\n yield t[i:i+chunk]", "def _split_on_chunks(self, iterable, size):\n return utils.split_on_chunks(iterable, size)", "def in_memory_rechunk(\n inputs: List[Tuple[core.ChunkKey, xarray.Dataset]],\n target_chunks: Mapping[str, int],\n) -> Iterator[Tuple[core.ChunkKey, xarray.Dataset]]:\n key, dataset = consolidate_chunks(inputs)\n yield from split_chunks(key, dataset, target_chunks)", "def chunkify(iterable, chunk_size):\n _it = iter(iterable)\n while True:\n batch = islice(_it, chunk_size)\n yield chain([batch.__next__()], batch)", "def chunk(iter_list, size):\n iter_list = iter(iter_list)\n # lambda: creates a returning expression function\n # which returns slices\n # iter, with the second argument () stops creating\n # iterators when it reaches the end\n return iter(lambda: tuple(islice(iter_list, size)), ())", "def test_chunk_size_has_priority_over_n_splits(self):\n chunks = list(chunk_tasks(range(4), chunk_size=4, n_splits=4))\n self.assertEqual(len(chunks), 1)\n self.assertEqual(len(chunks[0]), 4)\n self.assertEqual(list(range(4)), list(chain.from_iterable(chunks)))", "def chunks(lst, chunk_size):\n for i in range(0, len(lst), chunk_size):\n yield lst[i:i + chunk_size]", "def _next_batch(self, loader: CustomIterator) -> list:\n return self.mover.move(loader.__next__())", "def split_to_chunks(of_list, chunk_size):\n assert of_list is not None\n\n for i in range(0, len(of_list), chunk_size):\n yield of_list[i:i + chunk_size]", "def _split_in_chunks(lst: Sequence[Any], chunksize: int) -> Iterator[Sequence[Any]]:\n for i in range(0, len(lst), chunksize):\n yield lst[i:i + chunksize]", "def chunker( it, size ):\n \n # Variables\n it = iter( it )\n \n # Selecting a bunch of jobs\n while True:\n p = tuple( itertools.islice( it, size ) )\n if not p:\n break\n yield p", "def chunks(iterable, size):\n it = iter(iterable)\n chunk = tuple(itertools.islice(it, size))\n while chunk:\n yield chunk\n chunk = tuple(itertools.islice(it, size))", "def iter_chunks(chunksize, *iterables):\n iterables = iter(zip(*iterables))\n\n while 1:\n chunk = tuple(islice(iterables, chunksize))\n\n if not chunk:\n return\n\n yield chunk", "def chunk(list, chunksize):\n for i in range(0, len(list), chunksize):\n yield list[i:i + chunksize]", "def chunker(iterable, size):\n for i in range(0, len(iterable), size):\n yield iterable[i:i + size]", "def chunker(iterable, size):\n for i in range(0, len(iterable), size):\n yield iterable[i:i + size]", "def chunker(iterable, size):\n for i in range(0, len(iterable), size):\n yield iterable[i:i + size]", "def chunk(lst, chunk_len):\n\n for index in range(0, len(lst), chunk_len):\n yield lst[index:index + chunk_len]", "def chunkerator(obj: Iterable, stepsize: int = 10) -> Iterator:\n\n if obj:\n chunk, obj = obj[0:stepsize], obj[stepsize:]\n\n try:\n yield chunk\n yield from chunkerator(obj, stepsize=stepsize)\n except (RuntimeError, StopIteration, UnboundLocalError):\n pass", "def test_chunked():\n examples = list(range(10))\n assert list(chunked(iter(examples), 0)) == examples\n assert list(chunked(iter(examples), 1)) == [[i] for i in examples]\n assert list(chunked(iter(examples), 2)) == [[0,1], [2,3], [4,5], [6,7], [8,9]]\n assert list(chunked(iter(examples), 3)) == [[0,1,2], [3,4,5], [6,7,8], [9]]\n assert list(chunked(iter(examples), 4)) == [[0,1,2,3], [4,5,6,7], [8,9]]\n assert list(chunked(iter(examples), 5)) == [[0,1,2,3,4], [5,6,7,8,9]]\n assert list(chunked(iter(examples), 6)) == [[0,1,2,3,4,5], [6,7,8,9]]\n assert list(chunked(iter(examples), 7)) == [[0,1,2,3,4,5,6], [7,8,9]]\n assert list(chunked(iter(examples), 8)) == [[0,1,2,3,4,5,6,7], [8,9]]\n assert list(chunked(iter(examples), 9)) == [[0,1,2,3,4,5,6,7,8], [9]]\n assert list(chunked(iter(examples), 10)) == [examples]\n assert list(chunked(iter(examples), 11)) == [examples]", "def chunk(flat, sizes):\n iter_flat = iter(flat)\n yield from (list(islice(iter_flat, 0, size)) for size in sizes)", "def chunk(items, chunk_size):\n start_index = 0\n for start_index in xrange(0, len(items), chunk_size):\n end_index = min(start_index+chunk_size, len(items))\n yield items[start_index:end_index]", "def chunk_list(items, chunk_size=25):\n start = 0\n for end in range(chunk_size, len(items) + chunk_size, chunk_size):\n chunk = items[start:end]\n start = end\n yield chunk" ]
[ "0.6354266", "0.61556524", "0.60194796", "0.59803045", "0.59595215", "0.5909632", "0.5840156", "0.58236504", "0.57858485", "0.57338417", "0.5732517", "0.57212216", "0.5676315", "0.56753504", "0.5671433", "0.565042", "0.56440836", "0.56298584", "0.5615986", "0.5610143", "0.5595763", "0.55873", "0.55873", "0.55873", "0.5586953", "0.55825317", "0.5574189", "0.55622005", "0.5558321", "0.55497736" ]
0.7530391
0
Add takes 1 argument an 'int' or 'float', the number to add to 'total'
def add(self, num) -> [int, float]: if Calculater.check_int_or_float(num): self.total += num else: self.check_type(input_type=type(num))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(self, *args):\n sum = 0\n for arg in args:\n sum += float(arg)\n return sum", "def add(*args):\n body = ['<h1>Addition Calculator</h1>']\n _sum = sum(map(int, args))\n body.append(f'Total equals: {_sum}')\n return '\\n'.join(body)", "def add(*args):\n #convert args to floats so we can do the maths\n values = list(args)\n for x in range(len(values)):\n values[x] = float(values[x])\n \n summation = str(ft.reduce(oper.add,values))\n return summation", "def add(arg1, arg2):\r\n arg1 = float(arg1)\r\n arg2 = float(arg2)\r\n print(f\"Your answer is {arg1 + arg2}.\")", "def add(self, value):\n return self.number + value", "def add(self, x):\n self.sum += x\n self.n += 1", "def add(self, b):\n self.a += float(b)", "def add(*args):\n\n # TODO: Fill sum with the correct value, based on the\n # args provided.\n sum = str(args[0] + args[1])\n return sum", "def add(self, value):", "def add(a: Decimal, b: Decimal) -> Decimal:\n return a + b", "def adding_total_calories(total_calories: int) -> int:\n for item in _calories:\n total_calories = total_calories + _calories[item]\n return total_calories", "def add(*args):\n\n result = int(args[0]) + int(args[1])\n\n return str(result)", "def test_add():\n l = [1, 2, 3, 4]\n assert s7.add(*l) == sum(l)\n assert s7.add(100, 200) == 300\n assert s7.add(1.0, 2.0, 100.0) == 103.0", "def add(self,*datas):\n\t\tresult = sum(datas)\n\t\treturn result", "def inc_total(self, dif):\n if not(is_number_correct(dif)):\n raise ValueError(\"Incorrect total value!\")\n self.total += int(dif)\n self.budget_holder[datetime.datetime.now()] = self.total", "def add(self, number):\n self.number += number\n return self.number", "def add(farg, *args): # *args can take 0 or more values\n print('Formal argument = ', farg)\n \n sum = 0 \n for i in args:\n sum+=i\n print('Sum of all numbers =', (farg+sum))", "def add(self, x):\n self.sum += (1 / self.counter) * (x - self.sum)\n self.counter += 1", "def add_to_average(total_count, total_value, new_value):\n return ((1.0 * total_count * total_value) + new_value) / (total_count + 1)", "def addition(self, a, b):\n if not check_arguments(a, b): # check if arguments are numbers\n self.last_result = a + b", "def add(self, amount):\n self.amount += amount", "def add(num1, num2):\n sum = num1 + num2\n return sum", "def add(x, y):\n sum = 0\n sum = x + y\n return sum", "def kkAdd(*args):\n if (None in args):\n return None\n total = 0\n for arg in args:\n total += arg\n return total", "def add(self):\n return self._do_calc(self.adder)", "def add(self):\n return self._do_calc(self.adder)", "def add(self):\n return self._do_calc(self.adder)", "def add_numbers(a,b):\r\n return a+ b", "def total(num_list):\n num_sum = 0.0\n for item in num_list:\n num_sum += item\n return num_sum", "def test_add_float(self):\n self.assertAlmostEqual(cr.add(2.21, 4.7), 2.21 + 4.7, places=2)" ]
[ "0.7935701", "0.74250954", "0.739781", "0.7102527", "0.7092447", "0.7013905", "0.69816494", "0.6873964", "0.6804099", "0.6773353", "0.67576015", "0.6706462", "0.66936225", "0.6674202", "0.66623414", "0.6653001", "0.6609995", "0.6609812", "0.6603702", "0.6588289", "0.6569988", "0.6550683", "0.65456325", "0.65440977", "0.6543292", "0.6543292", "0.6543292", "0.6541271", "0.6535438", "0.6534371" ]
0.78199905
1
get disparity that is calculated by (cur_price / est_price ) 100
def get_disparity(code, k, w=1): est_price, shares, value, net_worth, roe, excess_earning = estimate_price(code, k, w) cur_price = reader.get_current_price(code) try: disparity = (cur_price / est_price) * 100 except: disparity = None return disparity, cur_price, est_price, shares, value, net_worth, roe, excess_earning
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def price_diff_rel_d(self): \n try:\n return(self.price_diff_d / self.price_open)\n except:\n return", "def get_risk_per_unit(price, sl_price):\n return abs(price - sl_price)", "def ret_vol_ratio(self) -> float:\n return self.geo_ret / self.vol", "def getFactor(currency):", "def price_diff_rel(self): \n try:\n return(self.price_diff / self.price_open)\n except:\n return", "def pe_ratio(self):\n try:\n return self.price / self.dividend_yield\n except ZeroDivisionError:\n return 0.0", "def vol_from_price( self, price ):\n def target_func( price, vol ):\n return self.price_from_vol( vol ) - price \n \n return brentq( partial( target_func, price ), 1e-8, 10 )", "def div(self):\n a = self.nums()\n x = LibraryFunctions.per(a, 0.9) - LibraryFunctions.per(a, 0.1)\n return x / 2.58", "def price_diff_d(self):\n try:\n return(self.direction*(self.price_close - self.price_open))\n except:\n return", "def net_position(self):\n average_price = 0\n sum = 0\n for transaction in self.transactions:\n average_price += abs(transaction[0]/transaction[1])\n sum += transaction[1]\n\n average_price /= len(self.transactions) \n average_price *= sum\n \n return average_price", "def ppcm_denominateurs(self):\n\t\tl = []\n\t\tn = 1\n\t\tif self.__valide:\n\t\t\tfor m in self.liste_decroissante():\n\t\t\t\t\"\"\" les denominateurs sont positifs \"\"\"\n\t\t\t\te = m.get_coefficient().get_denom().valeur()\n\t\t\t\tif not (e in l):\n\t\t\t\t\tl.append(e)\n\t\t\t\tn *= e\n\t\treturn n / pgcd_liste(l)", "def price_diff(self):\n try:\n return(self.price_close - self.price_open)\n except:\n return", "def loss_prime(actual: float, expect: float) -> float:\n return -expect / actual + (1 - expect) / (1 - actual)", "def get_price():\n return uniform(1.0, 350.0)", "def dilutionneeded(self) -> float:\n return self.stock*1.0/self.final", "def get_virtual_price() -> uint256:\n D: uint256 = self._get_D(self._xp(), self._A())\n # D is in the units similar to DAI (e.g. converted to precision 1e18)\n # When balanced, D = n * x_u - total virtual value of the portfolio\n token_supply: uint256 = ERC20(self.lp_token).totalSupply()\n return D * PRECISION / token_supply", "def implied_discount_factor(p1: Instrument, c1: Instrument, p2: Instrument, c2: Instrument) -> float:\n return (c1.price - p1.price - c2.price + p2.price)/ (c2.strike - c1.strike)", "def diffuse_ratio(DIFF_data,ghi_data): \n K = DIFF_data/ghi_data\n \n return K", "def expected_result(self, other):\r\n return float(1) / (1 + math.pow(10, float(other.elo - self.elo) / DIVIDER))", "def percentageChange(self):\n try:\n curPrice = self.dailyData[-1].currentPrice\n closePrice = self.historicData[-1].closePrice\n except IndexError: # Just return zero when no historic or dailyData is available yet\n return 0.0\n return (curPrice - closePrice)/closePrice * 100", "def get_fee_pct(self, contract_type: str) -> Tuple[float, float]:\n if contract_type == 'forex':\n return (0.00002, 0.00002)\n elif contract_type == 'crypto':\n if self.CRYPTO_EXCHANGE == 'binance':\n if self.trade_volume < 50_000:\n return (.001, .001)\n elif self.trade_volume < 100_000:\n return (.0009, .0009)\n elif self.trade_volume < 5000_000:\n return (.0009, .0008)\n elif self.trade_volume < 1_000_000:\n return (.0008, .0007)\n elif self.trade_volume < 5_000_000:\n return (.0007, .0005)\n elif self.trade_volume < 10_000_000:\n return (.0006, .0004)\n elif self.trade_volume < 25_000_000:\n return (.0006, 0)\n elif self.trade_volume < 100_000_000:\n return (.0005, 0)\n elif self.trade_volume < 250_000_000:\n return (.0004, 0)\n elif self.trade_volume < 500_000_000:\n return (.0003, 0)\n else: return (.0002, 0)\n elif self.CRYPTO_EXCHANGE == 'kraken':\n if self.trade_volume < 50_000:\n return (.0026, .0016)\n elif self.trade_volume < 100_000:\n return (.0024, .0014)\n elif self.trade_volume < 250_000:\n return (.0022, .0012)\n elif self.trade_volume < 500_000:\n return (.002, .001)\n elif self.trade_volume < 1_000_000:\n return (.0018, .0008)\n elif self.trade_volume < 2_500_000:\n return (.0016, .0006)\n elif self.trade_volume < 5_000_000:\n return (.0014, .0004)\n elif self.trade_volume < 10_000_000:\n return (.0012, .0002)\n else: return (.001, 0)\n elif self.CRYPTO_EXCHANGE == 'coinbase':\n if self.trade_volume < 10_000:\n return (.005, .005)\n elif self.trade_volume < 50_000:\n return (.0035, .0035)\n elif self.trade_volume < 100_000:\n return (.0025, .0015)\n elif self.trade_volume < 1_000_000:\n return (.002, .001)\n elif self.trade_volume < 10_000_000:\n return (.0018, .0008)\n elif self.trade_volume < 50_000_000:\n return (.0015, .0005)\n elif self.trade_volume < 300_000_000:\n return (.0007, 0)\n elif self.trade_volume < 500_000_000:\n return (.0005, 0)\n else: return (.0004, 0)\n elif self.CRYPTO_EXCHANGE == 'robinhood':\n return (0.0001, 0.0001)\n return (0, 0)", "def get_discount(self, price):\r\n pass", "def detectar_constantes_btc():\n\n ultimos_precios = persistence.traer_ultimos_precios_btc()\n prev = int(ultimos_precios[0])\n porcentaje = 0\n counter = 0\n for i in range(1,60):\n if prev < int(ultimos_precios[i]):\n counter = counter + 1\n elif prev > int(ultimos_precios[i]):\n counter = counter - 1\n prev = int(ultimos_precios[i])\n porcentaje = calcular_porcentaje(int(ultimos_precios[0]), int(ultimos_precios[i]))\n porcentaje = round(porcentaje, 2)\n if counter > 10 and porcentaje > 1:\n return porcentaje\n elif counter < -10 and porcentaje < -1:\n return porcentaje\n else:\n return 0", "def stone_parity(self, board):\n computer_score = sum(sum(board == self.computer_num))\n opponent_score = sum(sum(board == self.opponent_num))\n return 100 * (computer_score - opponent_score) / (computer_score + opponent_score)", "def detectar_constantes_doge():\n\n ultimos_precios = persistence.traer_ultimos_precios_doge()\n prev = float(ultimos_precios[0])\n porcentaje = 0\n counter = 0\n for i in range(1,60):\n if prev < float(ultimos_precios[i]):\n counter = counter + 1\n elif prev > float(ultimos_precios[i]):\n counter = counter - 1\n prev = float(ultimos_precios[i])\n\n porcentaje = calcular_porcentaje(float(ultimos_precios[0]), float(ultimos_precios[i]))\n porcentaje = round(porcentaje, 2)\n if counter > 10 and porcentaje > 1:\n return porcentaje\n elif counter < -10 and porcentaje < -1:\n return porcentaje\n else:\n return 0", "def resultado(self):\n return self.__numerador/self.__denominador", "def _get_lip_best(self) -> float:\n pass", "def cv(self):\n return self.close.std() / self.close.mean()", "def _compute_binary_demographic_parity(\n tp: torch.Tensor, fp: torch.Tensor, tn: torch.Tensor, fn: torch.Tensor\n) -> Dict[str, torch.Tensor]:\n pos_rates = _safe_divide(tp + fp, tp + fp + tn + fn)\n min_pos_rate_id = torch.argmin(pos_rates)\n max_pos_rate_id = torch.argmax(pos_rates)\n\n return {\n f\"DP_{min_pos_rate_id}_{max_pos_rate_id}\": _safe_divide(pos_rates[min_pos_rate_id], pos_rates[max_pos_rate_id])\n }", "def cash_ratio(self):\n return self.cash / self.current_liabilities" ]
[ "0.6276128", "0.6193277", "0.61364526", "0.6128976", "0.61272293", "0.60530365", "0.59040016", "0.58949846", "0.5863152", "0.58590317", "0.58321106", "0.5761174", "0.5748162", "0.56991106", "0.5658361", "0.5656721", "0.5639175", "0.5624067", "0.5613983", "0.55807894", "0.55622333", "0.553355", "0.5517839", "0.55134153", "0.5497263", "0.5486761", "0.5471004", "0.5432417", "0.5431752", "0.5429433" ]
0.7322828
0
Spawns the given number of workers, by default daemon, and returns a list of them. 'interval' determines the time delay between each launching
def spawnWorkers(num, target, name=None, args=(), kwargs={}, daemon=1, interval=0): from threading import Thread threads = [] for i in range(num): t = Thread(target=target, name=name, args=args, kwargs=kwargs) t.setDaemon(daemon) t.start() threads.append(t) time.sleep(interval) return threads
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __launch_worker__start_vms(cls, worker_obj, num_vms_to_start=0):\n inst_to_deploy = []\n if num_vms_to_start > 0:\n # Start a new instances\n print \"Starting {0} new workers\".format(num_vms_to_start)\n inst_to_deploy = worker_obj.start_instance(num=num_vms_to_start)\n if not isinstance(inst_to_deploy, list):\n inst_to_deploy = [inst_to_deploy]\n return inst_to_deploy", "def _spawn_workers(self):\n self._event.set()\n self._workers = [ClassifierWorker(self._event, self._queue, self._results) for x in range(self._NUM_WORKERS)]\n [worker.start() for worker in self._workers]", "def jobs():\n result = []\n out = subprocess.check_output([\"/bin/launchctl\", \"list\"]).decode()\n for row in out.splitlines()[1:]:\n result.append(Job(row))\n return result", "def init_workers(self):\n worker_list = []\n for number in range(0, self.staff):\n worker = self.get_worker()\n worker.job_id = self.job_id\n worker.number = number + 1\n worker_list.append(worker)\n self.worker_list = worker_list\n return worker_list", "def create_worker(num_worker, server_ip, server_port):\n for i in range(int(num_worker)):\n print \"-- worker initializing --\"\n dask_server = Worker('tcp://'+server_ip+\":\"+str(server_port), loop=loop)\n dask_server.start()", "def start_daemon(interval=600):\n thread = threading.Thread(target=daemon_job, args=(interval, ))\n thread.daemon = True\n thread.start()", "def workers(self):\n return self.worker_list", "def create_jobs(num_jobs=1, lr=0.01):\n return [\n JobOpts(job_id=1, lr=lr)\n for j in range(1, num_jobs+1)\n ]", "def get_workers(limit=20):\n connection = MySQLdb.connect(host = server_info[\"db_host\"],\n user = server_info[\"db_username\"],\n passwd = server_info[\"db_password\"],\n db = server_info[\"db_name\"])\n cursor = connection.cursor()\n cursor.execute(sql[\"select_workers\"], (limit,))\n return cursor.fetchall()", "def genJobList():\n nit=10\n reply=[]\n while len(reply)<10: #assume qstat fails if less that 10 jobs on cluster\n reply=chomp(os.popen('qstat|expand|tr -s \\' \\'|cut -d\\' \\' -f 1,2,5').readlines())\n nit+=1\n if nit>10: break\n return reply", "def _launchWorkers(self, cmdLine, numWorkers):\n\n self._workers = []\n for i in range(numWorkers):\n stdout = tempfile.TemporaryFile()\n stderr = tempfile.TemporaryFile()\n p = subprocess.Popen(cmdLine, bufsize=1, env=os.environ, shell=True,\n stdin=None, stdout=stdout, stderr=stderr)\n self._workers.append(p)", "def give_workers_list(self):\n return self._workers", "def background_worker_pool(self):\r\n return self.run_tracker.background_worker_pool()", "def create_workers(self, threads_count):\n\n for _ in xrange(threads_count):\n new_thread = Thread(target=self.execute)\n self.threads.append(new_thread)", "def get_worker_nodes(self):\n worker_nodes_count = input('enter number of worker nodes\\n'\n 'default [2]: ')\n default = 2\n worker_nodes_count = set_values(worker_nodes_count, default, check='integer')\n worker_keys = ['name','ip','mac']\n self.inventory_dict['csah']['vars']['worker_nodes'] = []\n for num in range(worker_nodes_count):\n worker_values = []\n default = 'worker-{}'.format(num)\n worker_name = input('enter the worker {} node name\\n'\n 'default [{}]: '.format(num, default))\n worker_name = set_values(worker_name, default)\n worker_ip = get_ip(node_name=worker_name, ip_type='os')\n worker_mac = get_network_device_mac(node_name=worker_name, ip_type='idrac')\n worker_values.append(worker_name)\n worker_values.append(worker_ip)\n worker_values.append(worker_mac)\n worker_node_dict_pairs = dict(zip(worker_keys, worker_values))\n logging.info('adding {} values as name: {} ip: {} mac: {}'.format(worker_name, worker_name,\n worker_ip, worker_mac)) \n self.inventory_dict['csah']['vars']['worker_nodes'].append(worker_node_dict_pairs)\n self.clear_screen()\n self.inventory_dict['csah']['vars']['number_of_workers'] = worker_nodes_count", "def parallel_provision_server(self, bodies, **kwargs):\n thrd_no = kwargs.get('count', 9)\n thrd_out = kwargs.get('timeout', 60)\n thrd_poll = kwargs.get('interval', 60)\n threads = list()\n\n # Identify time out for the keyword\n _t = 1 if len(bodies) < thrd_no else (len(bodies) / thrd_no) + 1\n time_out = datetime.now() + timedelta(minutes=thrd_out * _t)\n\n while time_out > datetime.now():\n LOG.publish_message(timeout=2)\n\n for index, item in enumerate(threads):\n if not item.is_alive():\n threads.pop(index)\n\n if len(threads) < thrd_no and len(bodies):\n t = Thread(target=self._parallel_deploy,\n args=(bodies.pop(), thrd_out, thrd_poll,))\n t.daemon = True\n t.start()\n threads.append(t)\n elif not len(bodies) and not len(threads):\n # No pending tasks or jobs\n break\n else:\n # There are some pending tasks or jobs\n sleep(5)\n\n LOG.publish_message(timeout=60)\n return self.results", "def daemon_job(interval):\n time.sleep(3) # Wait for api server to start first\n while True:\n try:\n crawl()\n process_notification()\n except Exception:\n traceback.print_exc()\n time.sleep(interval)", "def workers(self):\n return list(self._workers.keys())", "def _configure_remote_workers(default_num_clients, remote_executors):\n loop, must_close_loop = _get_event_loop()\n available_executors = [ex for ex in remote_executors if ex.is_ready]\n logging.info('%s TFF workers available out of a total of %s.',\n len(available_executors), len(remote_executors))\n if not available_executors:\n raise execution_context.RetryableError(\n 'No workers are ready; try again to reconnect.')\n try:\n remaining_clients = default_num_clients\n live_workers = []\n for ex_idx, ex in enumerate(available_executors):\n remaining_executors = len(available_executors) - ex_idx\n default_num_clients_to_host = remaining_clients // remaining_executors\n remaining_clients -= default_num_clients_to_host\n if default_num_clients_to_host > 0:\n _configure_remote_executor(\n ex, {placements.CLIENTS: default_num_clients_to_host}, loop)\n live_workers.append(ex)\n finally:\n if must_close_loop:\n loop.stop()\n loop.close()\n return [\n _wrap_executor_in_threading_stack(e, can_resolve_references=False)\n for e in live_workers\n ]", "def _create_workers(self, start=True):\n\n bearer = api_client.get_bearer_token()\n account = api_client.account_id_from_jwt(bearer.value)\n LOGGER.info(\"account: %s\", account)\n\n project = self.args.get(\"project\") or None\n LOGGER.info(\"project: %s\", project)\n\n location = self.args.get(\"location\") or None\n LOGGER.info(\"location: %s\", location)\n\n thread_count = self.args.get(\"thread_count\") or 1\n LOGGER.info(\"thread_count: %s\", thread_count)\n\n # CREATE WORKER PROCESSES\n workers = {}\n\n # Create DownloadWorker processes\n for _ in range(thread_count):\n\n # Create a process-safe run_state object for controlling process\n # run_state = multiprocessing.Array('c', \"stoppingorstuff\")\n global RUN_STATE\n wrk = UploaderWorker(\n RUN_STATE,\n self._results_queue,\n account=account,\n project=project,\n location=location)\n workers[wrk] = RUN_STATE\n\n log_history_wrk = self.create_log_history()\n\n workers[log_history_wrk] = RUN_STATE\n\n if start:\n for wrkr in workers:\n wrkr.start()\n time.sleep(.5)\n\n return workers", "def _setup_workers(self, num_workers):\n self.pool = []\n\n for _ in range(num_workers):\n self.pool.append(Thread(target=self.threadloop))\n\n for a_thread in self.pool:\n a_thread.setDaemon(True)\n a_thread.start()", "def __init__(self, maxsize=0, workers=10):\n self.queue = JoinableQueue(maxsize=maxsize)\n [spawn(self.worker) for x in range(workers)]", "def start_workers(self, window_size):\n input_q = mp.Queue(maxsize=self.processes)\n output_q = mp.Queue()\n workers = []\n for _ in range(self.processes):\n accumulator = PatchedWordOccurrenceAccumulator(self.relevant_ids, self.dictionary)\n worker = AccumulatingWorker(input_q, output_q, accumulator, window_size)\n worker.start()\n workers.append(worker)\n\n return workers, input_q, output_q", "def setup_workers(num_cpus, outdir, server_socket, verbose=True,\r\n error_profile=None):\r\n DENOISE_WORKER = \"denoiser_worker.py\"\r\n workers = []\r\n client_sockets = []\r\n # somewhat unique id for cluster job\r\n tmpname = \"\".join(sample(list(lowercase), 8))\r\n\r\n host, port = server_socket.getsockname()\r\n\r\n # TODO: this should be set to a defined wait time using alarm()\r\n for i in range(num_cpus):\r\n name = outdir + (\"/%sworker%d\" % (tmpname, i))\r\n workers.append(name)\r\n cmd = \"%s -f %s -s %s -p %s\" % (DENOISE_WORKER, name, host, port)\r\n\r\n if verbose:\r\n cmd += \" -v\"\r\n if error_profile:\r\n cmd += \" -e %s\" % error_profile\r\n\r\n submit_jobs([cmd], tmpname)\r\n # wait until the client connects\r\n # This might be a race condition -> make the client robust\r\n client_socket, client_address = server_socket.accept()\r\n client_sockets.append((client_socket, client_address))\r\n\r\n return workers, client_sockets", "def run(self):\n for worker in self.simulation_workers:\n worker.start()", "def start_threads(count):\n for i in range(count):\n threading.Thread(target=send_pulses, args=(i,)).start()", "def startWorkers(self):\n for i in range(self.aOT):\n t = thr.Thread(target=self.threadWorker)\n t.start()\n self.threads.append(t)", "def getWorkers(self):\n return self.workers", "def workers_status(self):\n workers = []\n for agent in self.agents_status():\n workers += agent['workers']\n return workers", "def execute_work_items(timeout,\n work_items,\n config):\n print(\"execute_work_items\")\n return celery.group(\n worker_task.s(work_item,\n timeout,\n config)\n for work_item in work_items)" ]
[ "0.5961028", "0.59262305", "0.589819", "0.58462024", "0.5842554", "0.5837701", "0.5811775", "0.5801807", "0.574919", "0.56564355", "0.56557107", "0.5641795", "0.5583108", "0.5534931", "0.54899144", "0.54853344", "0.5479785", "0.5467822", "0.5460599", "0.54557663", "0.54528475", "0.5380779", "0.53774464", "0.5369221", "0.53516656", "0.5330301", "0.53242624", "0.5317642", "0.52715194", "0.52697563" ]
0.7500879
0
Converts a sequence of elements to a string in a standard, filesystemsafe way
def seq2str(seq): return ','.join(map(cleanstr, seq))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stringer(list):\n\tstring = \"\"\n\tfor x in list:\n\t\tstring = string + str(x)\n\treturn string", "def get_itr_str(iterable) -> str:\n\treturn str([str(i) for i in iterable])", "def list2string(a_list):\n\n the_string = ''\n for elem in a_list:\n the_string += str(elem)\n return the_string", "def listToString(L):\r\n S = ''\r\n for x in L:\r\n S += str(x)\r\n return S", "def list_to_str( L ):\n if len(L) == 0: return ''\n return L[0] + list_to_str( L[1:] )", "def list_to_str( L ):\n if len(L) == 0: return ''\n return L[0] + list_to_str( L[1:] )", "def listToString(s):\n # initialize an empty string\n str1 = \"\"\n\n # traverse in the string\n for ele in s:\n str1 += ele\n\n # return string\n return str1", "def listToString(s):\n # initialize an empty string\n str1 = \"\"\n\n # traverse in the string\n for ele in s:\n try:\n str1 = str1 + \" \" + ele\n except:\n pass\n\n # return string\n return str1", "def list_to_str(input_str):\r\n\r\n return \" \".join([str(val) for val in input_str])", "def list_to_str(list_to_convert):\n return ' '.join(to_str(item) for item in list_to_convert)", "def list_2_string(l, name='List'):\n buff = io.StringIO()\n print_list(l, name=name, output=buff)\n return buff.getvalue()", "def unicode_list_to_str(u_code_list): #This is just a function for me. Has nothing to do with flask or anything, okay?\n out_list = \"\"\n for item in u_code_list:\n out_list = out_list + str(item) + \"-\"\n return out_list.rstrip(\"-\") #removes the extra '-' (i.e 2-3-4-1-)", "def _convertListToString(self, list_of_objects):\n return (';').join(list_of_objects)", "def join(self, iterable) -> String:\n pass", "def list_to_string(in_list):\n if not in_list:\n return \"[]\"\n else:\n return \"\\n- \" + \"\\n- \".join(in_list)", "def as_str(the_val):\n if hasattr(the_val, \"__iter__\"):\n return \"[{}]\".format(\", \".join([str(v) for v in the_val]))\n return str(the_val)", "def batches2string(batches):\n s = [''] * batches[0].shape[0]\n for b in batches:\n s = [''.join(x) for x in zip(s, characters(b))]\n return s", "def batches2string(batches):\n s = [''] * batches[0].shape[0]\n for b in batches:\n s = [''.join(x) for x in zip(s, characters(b))]\n return s", "def batches2string(batches):\n s = [''] * batches[0].shape[0]\n for b in batches:\n s = [''.join(x) for x in zip(s, characters(b))]\n return s", "def seqs_to_strs(self,id_array,remove_pads=True):\n l = self.seqs_to_toks(id_array,remove_pads)\n return [' '.join(i) for i in l]", "def _com_to_string(com):\n return sep.join([str(x) for x in com])", "def get_list_as_str(list_to_convert):\n return \", \".join([\"'{}'\".format(list_item) for list_item in list_to_convert])", "def listtostring(self, charlist):\n s = \"\"\n for char in charlist:\n s += char\n return s", "def list_str(lis):\r\n as_str = \"\"\r\n for item in lis:\r\n as_str += \" \" + str(item) + \",\"\r\n return as_str[:-1]", "def list_to_string(list):\n if len(list) == 1:\n string = '{}x1'.format(list[0])\n elif list[1:] == list[:-1]:\n string = '{}x{}'.format(list[1], len(list))\n else:\n string = ''\n for i in range(len(list) - 1):\n string += str(list[i]) + ','\n string += str(list[-1])\n return string", "def l2s(l):\n return ''.join(l)", "def _list2str(self, data, delimiter=\",\", classify=lambda x: x):\n res = \"\"\n for i in range(len(data)):\n res += classify(data[i])\n if i != len(data) - 1:\n res += delimiter + \" \"\n return res", "def _to_string(self, lst, indent=''):\n result = []\n for elem in lst:\n if isinstance(elem, list):\n if len(elem) > 0:\n result.append('\\n')\n result.append(self._to_string(elem, indent + ' '))\n elif isinstance(elem, float):\n result.append('%.6f' % elem)\n elif isinstance(elem, basestring):\n for char in ('(', ')', ' '):\n if char in elem:\n result.append('\"%s\"' % elem)\n break\n else:\n result.append(str(elem))\n elif elem is not None:\n result.append(str(elem))\n return indent + '(' + ' '.join(result) + ')\\n' + indent", "def array_to_concatenated_string(array):\r\n return \",\".join(str(x) for x in array)", "def str_transform_list(L):\n return [str(x) for x in L]" ]
[ "0.7136884", "0.6967668", "0.68453056", "0.684023", "0.68400466", "0.68400466", "0.6574962", "0.64408785", "0.64219457", "0.6413968", "0.63567305", "0.63358706", "0.6314232", "0.63002855", "0.62802845", "0.6277351", "0.6253571", "0.6253571", "0.6253571", "0.62460905", "0.62440765", "0.6243292", "0.6230842", "0.622616", "0.62219316", "0.61920613", "0.6163278", "0.6146982", "0.6140668", "0.6123702" ]
0.7047315
1