query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Read image using its path. Default value is grayscale, and image is read by YCbCr format as the paper said. | def imread(path, is_grayscale=True):
if is_grayscale:
return scipy.misc.imread(path, flatten=True, mode='YCbCr').astype(np.float)
else:
return scipy.misc.imread(path, mode='YCbCr').astype(np.float) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def imread(path, is_grayscale=True):\n if is_grayscale:\n #flatten=True 以灰度图的形式读取 \n return scipy.misc.imread(path, flatten=True, mode='YCbCr').astype(np.float)\n else:\n return scipy.misc.imread(path, mode='YCbCr').astype(np.float)",
"def imread(path, is_grayscale=True):\n if is_grayscale:\n # flatten=True: 形成單層的灰階通道\n return scipy.misc.imread(path, flatten=True, mode='YCbCr').astype(np.float)\n else:\n return scipy.misc.imread(path, mode='YCbCr').astype(np.float)",
"def imread(path, is_grayscale=True):\n if is_grayscale:\n return imageio.imread(path, as_gray=True, pilmode='YCbCr').astype(np.float32)\n else:\n return imageio.imread(path, pilmode='YCbCr').astype(np.float32)",
"def readImage(self, path, tt=1):\n return cv2.imread( path, tt)",
"def _read_input_file(self, path: Path):\n img = cv2.imread(str(path))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # shape: (H, W, C)\n return img",
"def __read_image(self, path):\n path = 'data/' + path\n image = cv2.imread(path)\n\n # Convert greyscale image to BGR\n if image.shape[-1] == 1:\n image = np.dstack([image, image, image])\n\n # Convert BGR image to RGB image\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n return image",
"def read_image(path):\n img = misc.imread(path)\n return img",
"def read_img(path):\r\n if os.path.isfile(path):\r\n return cv2.imread(path)\r\n else:\r\n raise ValueError('hiiiiiiiiii')",
"def imread(path):\n with open(path, 'rb') as f:\n with PIL.Image.open(f) as img:\n return img.convert('RGB')",
"def imread(path, is_grayscale=False):\r\n\r\n if is_grayscale:\r\n return scipy.misc.imread(path, flatten=True).astype(np.float32)\r\n # img1=cv.imread(path).astype(np.float32)\r\n # return cv.cvtColor(img1,cv.COLOR_BGR2YCrCb)\r\n else:\r\n # img1=cv.imread(path).astype(np.float32)\r\n # return cv.cvtColor(img1,cv.COLOR_BGR2YCrCb)\r\n\r\n return scipy.misc.imread(path).astype(np.float32)",
"def read_image(path):\n img = ndimage.imread(path, mode=\"RGB\") \n return img",
"def read_image(path):\n\n image = cv2.imread(path)\n return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)",
"def read_image(path: str):\n return Image.open(path, mode=\"r\")",
"def read_image(image_path: str):\n\treturn cv.imread(image_path, cv.IMREAD_UNCHANGED)",
"def read_image(image_path, gray=False):\n if gray:\n return cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)\n \n image = cv2.imread(image_path) \n return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)",
"def read_image(image_path, *args, **kwargs):\n # TODO: Implement the method\n image2 = Image.open(image_path)\n image = num.asarray(image2)\n\n return image",
"def read_rgb_image(img_path, format='ndarray'):\n got_img = False\n if not osp.exists(img_path):\n raise IOError(\"{} does not exist\".format(img_path))\n while not got_img:\n try:\n if format == 'PIL':\n img = Image.open(img_path).convert(\"RGB\")\n elif format == 'ndarray':\n img = cv2.imread(img_path)\n if len(img.shape) == 3:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n got_img = True\n except IOError:\n print(\"IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.\".format(img_path))\n return img",
"def imread(path, as_gray=False, **kwargs):\n image = iio.imread(path, as_gray, **kwargs)\n if image.dtype == np.uint8:\n image = image / 127.5 - 1\n elif image.dtype == np.uint16:\n image = image / 32767.5 - 1\n elif image.dtype in [np.float32, np.float64]:\n image = image * 2 - 1.0\n else:\n raise Exception(\"Inavailable image dtype: %s!\" % image.dtype)\n return image",
"def _open_image(self, path):\n return cv.imread(path, 1)\n # .astype(float)",
"def imread(path):\n img = cv2.imread(path)\n return img",
"def read_image(image_path):\n return np.array(load_img(image_path, color_mode='grayscale')) / 255",
"def Read(image_path):\n # use cv2.imread() to read an images.\n # syntax : cv2.imread(filename, flag=None)\n return cv2.imread(image_path, 0)",
"def read_image(img_path):\n img = imageio.imread(uri=img_path)\n return img",
"def read(\n img_path: Union[str, pathlib.Path], mode: int = cv2.IMREAD_UNCHANGED\n) -> np.ndarray:\n return cv2.imread(str(img_path), mode)",
"def read(path: Union[Path, str]) -> np.ndarray:\n return _reader.imread(str(path))",
"def read_image(image_path: str, gray: bool=False) -> np.ndarray:\n if gray:\n return cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)\n\n image = cv2.imread(image_path)\n return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)",
"def read_image(fname):\n\timg = cv2.imread(fname,cv2.IMREAD_GRAYSCALE)\n\treturn img",
"def load_image(self, image_path):\n # Load image\n image = cv2.imread(image_path)\n #TODO 如果是灰度图先转为RGB的\n # If grayscale. Convert to RGB for consistency.\n # if image.ndim != 3:\n # image = skimage.color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n return image\n pass",
"def get_input(path):\n img = imread(path)\n return img",
"def read_image_greyscale(path: str) -> np.ndarray:\n img = imread(path)\n if len(img.shape) > 2:\n img = np.dot(img[..., :3], [0.299, 0.587, 0.114])\n return img"
] | [
"0.778447",
"0.77261835",
"0.7638995",
"0.75208247",
"0.74676746",
"0.74573797",
"0.7367039",
"0.722754",
"0.721408",
"0.72122663",
"0.71996546",
"0.7183188",
"0.70761466",
"0.7057287",
"0.7026387",
"0.70155853",
"0.6943071",
"0.6942862",
"0.69331473",
"0.689974",
"0.6891293",
"0.6876135",
"0.6834026",
"0.6803568",
"0.676568",
"0.6737664",
"0.67365336",
"0.6699616",
"0.66899395",
"0.6669361"
] | 0.7768587 | 1 |
Dynamically generate options for resource group form field based on the user's selection for Environment. This method requires the user to set the resource_group parameter as dependent on environment. | def generate_options_for_resource_group(control_value=None, **kwargs):
if control_value is None:
return []
env = Environment.objects.get(id=control_value)
if CB_VERSION_93_PLUS:
# Get the Resource Groups as defined on the Environment. The Resource Group is a
# CustomField that is only updated on the Env when the user syncs this field on the
# Environment specific parameters.
resource_groups = env.custom_field_options.filter(
field__name="resource_group_arm"
)
return [rg.str_value for rg in resource_groups]
else:
rh = env.resource_handler.cast()
groups = rh.armresourcegroup_set.all()
return [g.name for g in groups] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_options_for_cloudbolt_environment(group=None, **kwargs):\n envs = Environment.objects.filter(\n resource_handler__resource_technology__name='Google Cloud Platform') \\\n .select_related('resource_handler')\n if group:\n group_env_ids = [env.id for env in group.get_available_environments()]\n envs = envs.filter(id__in=group_env_ids)\n return [\n (env.id, u'{env} ({project})'.format(\n env=env, project=env.gcp_project))\n for env in envs\n ]",
"def resource_group(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_group\")",
"def resource_group(self) -> str:\n return pulumi.get(self, \"resource_group\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")",
"def resource_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_group_name\")"
] | [
"0.6358487",
"0.5937777",
"0.55043066",
"0.54691523",
"0.54691523",
"0.54691523",
"0.54691523",
"0.54691523",
"0.54691523",
"0.54691523",
"0.54691523",
"0.54691523",
"0.54691523",
"0.54691523",
"0.54691523",
"0.54691523",
"0.54691523",
"0.54691523",
"0.54691523",
"0.54691523",
"0.54691523",
"0.54691523",
"0.54691523",
"0.54691523",
"0.54691523",
"0.54691523",
"0.54691523",
"0.54691523",
"0.54691523",
"0.54691523"
] | 0.6859024 | 0 |
Return the initialized output formatter based upon the configuration. | def initialize_formatter(config):
if config.json: # pylint: disable=R1705
return formatters.JsonFormatter()
elif config.severity: # pylint: disable=R1705
return formatters.SeverityFormatter(config.colored)
return formatters.Formatter(config.colored) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_formatter(self):\n return SectionedFormatter(\n sections=self.sections,\n width=self.terminal_width,\n max_width=self.max_content_width,\n )",
"def set_formatter_string(config: dict):\n formatter_str = \"%(levelname)s %(name)s\"\n\n if config.get(\"formatter\"):\n return config[\"formatter\"]\n\n if config.get(\"extended\"):\n formatter_str += \".%(funcName)s():\"\n\n if config.get(\"timestamp\"):\n formatter_str = \"%(asctime)s \" + formatter_str\n\n formatter_str += \" %(message)s\"\n\n return formatter_str",
"def get_formatter(style):\n if style == 'authoryear':\n return AuthorYearFormatter\n return AuthorYearFormatter",
"def get_format(self):\n format = QtGui.QTextCharFormat()\n\n # Set foreground color\n if self.foreground_color is not None:\n color = self.color_map[self.foreground_color][self.intensity]\n format.setForeground(QtGui.QColor(color))\n\n # Set background color\n if self.background_color is not None:\n color = self.color_map[self.background_color][self.intensity]\n format.setBackground(QtGui.QColor(color))\n\n # Set font weight/style options\n if self.bold:\n format.setFontWeight(QtGui.QFont.Bold)\n else:\n format.setFontWeight(QtGui.QFont.Normal)\n format.setFontItalic(self.italic)\n format.setFontUnderline(self.underline)\n\n return format",
"def set_formatter(self, enable_time=False, enable_msg_count=True):\n self.formatter = logging.Formatter(\n '{}%(name)s - %(levelname)s - %(message)s'.format(\n \"%(asctime)s - \" if enable_time else \"\",\n ))\n for handler in self.handlers:\n handler.setFormatter(self.formatter)\n return self.formatter",
"def format( self ) :\n\n return( self.__format )",
"def assign_format(self):\n if self.is_output or self.is_req_output:\n if self.pname in self.tool_data[self.tool_name]['output_fmt']:\n return self.tool_data[self.tool_name]['output_fmt'][self.pname]\n elif self.pname in self.gen_out_fmt:\n return self.gen_out_fmt[self.pname]\n elif self.is_input:\n if self.pname in self.tool_data[self.tool_name]['input_fmt']:\n print(self.tool_data[self.tool_name])\n return self.tool_data[self.tool_name]['input_fmt'][self.pname]\n elif self.pname in self.gen_in_fmt:\n return self.gen_in_fmt[self.pname]\n else:\n # Not sure yet what this will be used for, but I think we need it.\n return ''",
"def _config_formatter(self):\n filter = int( self.markup_filter )\n if filter == self.MARKUP_MARKDOWN:\n return { 'filter_name':'markdown' }\n elif filter == self.MARKUP_MARKDOWN_CODEHILITE:\n return { 'filter_name' : 'markdown',\n 'extensions' : [ 'codehilite' ] }\n elif filter == self.MARKUP_REST:\n return { 'filter_name':'restructuredtext' }\n elif filter == self.MARKUP_TEXTILE:\n return { 'filter_name' : 'textile' }\n else:\n raise ValueError( 'Invalid option for Entry.markup_filter' )",
"def set_formatter(self, formatter):\n self.format = formatter",
"def get_format(cls):\n return cls._format",
"def set_parser(self, output_format):\n self.output_parser = output_parsers.get(output_format, lambda x:x)",
"def init_logging(log_format: str='default', level: str='INFO') -> Union[DefaultFormatter, DebugFormatter]:\n stream_handler = logging.StreamHandler()\n if log_format == 'default':\n formatter = DefaultFormatter\n elif log_format == 'human':\n formatter = DebugFormatter\n else:\n raise ValueError('Unrecognized Format: {}'.format(log_format))\n stream_handler.setFormatter(formatter())\n ROOT_LOGGER.addHandler(stream_handler)\n ROOT_LOGGER.setLevel(level)\n return formatter",
"def getLogFormat(self):\n\t\treturn AbsentSafeRawConfigParser.absentSafeGet(self, \n\t\t\tLogConfigParser.__LOG_CONFIG_SECTION, \n\t\t\tLogConfigParser.__LOG_FORMAT_KEY)",
"def format(self) -> pulumi.Output[Optional['outputs.FlowLogFormatParametersResponse']]:\n return pulumi.get(self, \"format\")",
"def get_formatter(name):\n\n # Is it already in the path?\n try:\n return import_module('pycclone.formatters.' + name)\n except ImportError:\n pass\n\n # Import from user folder\n fpath = os.path.join(USERDIR, 'formatters', name, name + '.py')\n return load_source('pycclone.formatters.' + name, fpath)",
"def get_format(self):\n pass",
"def output(self, formatter, **overrides):\n return self.where(output=formatter, **overrides)",
"def format_(self):\n return self.set_format or self.default_format or self.FALLBACK_FORMAT",
"def format(self):\n return self._format",
"def get_formatter(self, group):\n return getattr(self, \"format_\" + group + \"_standings\")",
"def default_formatter(self, data):\n return data",
"def parser_formatter(format_class, **kwargs):\n try:\n return lambda prog: format_class(prog, **kwargs)\n except TypeError:\n return format_class",
"def parser_formatter(format_class, **kwargs):\n try:\n return lambda prog: format_class(prog, **kwargs)\n except TypeError:\n return format_class",
"def get_formatter(self, **kwargs):\n config = dict([\n (attr, getattr(self, attr))\n for attr in [\n \"include_sign\",\n \"group_with_commas\",\n \"num_decimal_places\",\n ]\n ])\n config.update(kwargs)\n return \"\".join([\n \"{\",\n config.get(\"field_name\", \"\"),\n \":\",\n \"+\" if config[\"include_sign\"] else \"\",\n \",\" if config[\"group_with_commas\"] else \"\",\n \".\", str(config[\"num_decimal_places\"]), \"f\",\n \"}\",\n ])",
"def get_format_table(self):\n try:\n with open(self._config.values['format'], 'r') as f:\n return f.read()\n except:\n return None",
"def define_log_renderer(self):\n # it must accept a logger, method_name and event_dict (just like processors)\n # but must return the rendered string, not a dictionary.\n # TODO tty logic\n if self.args.log_format == \"json\":\n return structlog.processors.JSONRenderer()\n\n if self.args.log_format == \"pretty\":\n return structlog.dev.ConsoleRenderer()\n\n if self.args.log_file is not None:\n return structlog.processors.JSONRenderer()\n\n if sys.stderr.isatty() and not self.args.quiet:\n return structlog.dev.ConsoleRenderer()\n\n return structlog.processors.JSONRenderer()",
"def initFormat(self):\n ChoiceFormat.initFormat(self)\n fullFormat = ''.join(self.formatList)\n try:\n self.sep = [sep for sep in CombinationFormat.outputSepList\n if sep not in fullFormat][0] + ' '\n except IndexError:\n self.sep = CombinationFormat.outputSepList[0] + ' '",
"def active_figure_format_config(self):\n if self.ui.active_figure_format_config_widget is None:\n return None # No active override config\n else:\n # Otherwise we have a config widget:\n figure_format_config = self.ui.active_figure_format_config_widget.figure_format_config\n return figure_format_config",
"def getFormatManager(self) -> ghidra.app.util.viewer.format.FormatManager:\n ...",
"def uctt_plugin_factory_output_config(\n environment: Environment, instance_id: str = ''):\n return OutputCliPlugin(environment, instance_id)"
] | [
"0.6507591",
"0.59741753",
"0.5937114",
"0.5926568",
"0.5859191",
"0.58575606",
"0.5845092",
"0.5838448",
"0.5783071",
"0.57651764",
"0.57422423",
"0.5739221",
"0.5724411",
"0.56877214",
"0.56793237",
"0.56695",
"0.5667886",
"0.566292",
"0.56339353",
"0.56309354",
"0.56209934",
"0.55973196",
"0.55973196",
"0.55743825",
"0.55599713",
"0.5544095",
"0.5518256",
"0.5431601",
"0.54015267",
"0.53977734"
] | 0.74388397 | 0 |
Returns the sorted list of problems. | def sort_problems(problems):
# Note: sort() doesn't return the sorted list; rather, it sorts the list
# in place
problems.sort(
key=lambda problem: (
problem.filename,
problem.linenumber,
problem.rule.id
)
)
return problems | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def problem_list(self):\r\n return [{\r\n 'location': location, 'problem_name': name,\r\n 'num_graded': self.DUMMY_DATA['problem_list_num_graded'],\r\n 'num_pending': self.DUMMY_DATA['problem_list_num_pending'],\r\n 'num_required': self.DUMMY_DATA['problem_list_num_required']\r\n } for location, name in self.problems.items()\r\n ]",
"def get_problems(self):\n\n with self.__orm.session_scope() as session:\n try:\n problems = session.query(Problem.name).all()\n return [problem[0] for problem in problems]\n except NoResultFound:\n return []",
"def problems(self):\n return self.configuration.problems",
"def getPossibilities(self):\n \n return sorted(self._possibilities)",
"def get_all(self):\n return gnome_sort(self.__assignments, sort_function=lambda assignment_a, assignment_b: assignment_a.get_assignment_id() <= assignment_b.get_assignment_id())",
"def get_problems():\n problems = list()\n solved = database.session.query(Submission).\\\n filter(Submission.username == current_user.username).\\\n filter(Submission.result == \"good\").\\\n all()\n solved_set = set()\n for solve in solved:\n solved_set.add(solve.pid)\n\n for problem in database.session.query(Problem).all():\n problems.append({\n 'pid': problem.pid,\n 'name': problem.name,\n 'shortname': problem.shortname,\n 'appeared': problem.appeared,\n 'difficulty': problem.difficulty,\n 'comp_release': problem.comp_release,\n 'added': problem.added,\n 'solved': problem.pid in solved_set,\n 'url': url_for_problem(problem)\n })\n return serve_response(problems)",
"def sort_solutions(self, solutions):\r\n if self.breeding_rules.sorting_order is ScoresSortingOrder.ASCENDING:\r\n reverse = False\r\n else:\r\n reverse = True\r\n return sorted(solutions, reverse=reverse, key=lambda solution: solution.score)",
"def get_sorted_disciplines(self):\n results = self.__create_discipline_and_grade_dto()\n results.sort(self.__compare_dtos_on_grade)\n return results",
"def getPriorityList(self):",
"def _toposort_with_ordered_mech_tuples(self, data):\n result = []\n for dependency_set in toposort(data):\n d_iter = iter(dependency_set)\n result.extend(sorted(dependency_set, key=lambda item : next(d_iter).mechanism.name))\n return result",
"def _get_problem_list(self):\r\n self._success_response({'problem_list': self.server.problem_list})",
"def sorted(x) -> List:\n pass",
"def list_unique_problems(arn=None, nextToken=None):\n pass",
"def sort(self):\n self.notes.sort()",
"def displaySorted(self):\r\n os.system('cls')\r\n for i in self.sortedList:\r\n print(str(i[2]) + \": \" + i[0].showRule())",
"def tiles_by_tissue_percentage(self):\n sorted_list = sorted(self.tiles, key=lambda t: t.tissue_percentage, reverse=True)\n return sorted_list",
"def organizeM():\n scores = []\n today_listM = strainer('name', 'sort', 'event')\n today_listM.extend(strainer('name', 'sort', 'todo'))\n data = list(today_listM)\n while len(data) != 0:\n number = lowest_number(data)\n scores.append(number)\n data.remove(number)\n return scores",
"def sorted_carnivores(self):\n fitness_dict = {carn: carn.fitness for carn in self.carnivores}\n sorted_tuples = dict(sorted(fitness_dict.items(), key=lambda x: x[1], reverse=True))\n\n return list(sorted_tuples.keys())",
"def print_sorted(self):\n new_list = []\n for x in sorted(self):\n new_list.append(x)\n print(new_list)",
"def display_problems():\n\n res = choose_problems()\n\n cc_name1 = res[0][0]\n url_link1 = res[0][1]\n cc_name2 = res[1][0]\n url_link2 = res[1][1]\n cc_name3 = res[2][0]\n url_link3 = res[2][1]\n\n #TODO: implement datetime (i.e. \"11.07.21\")\n print('Weekly Wednesday Problems')\n print(f'Problem 1: {cc_name1} - {url_link1}')\n print(f'Problem 2: {cc_name2} - {url_link2}')\n print(f'Problem 3: {cc_name3} - {url_link3}')\n\n return cc_name1, url_link1, cc_name2, url_link2, cc_name3, url_link3",
"def sorted(self): \n pass",
"def printSolutions(self):\n\t\tprint \"Computing solutions...\"\n\t\t\n\t\tsolutions = self.problem.getSolutions()\n\t\tnumberOfSolutions = len(solutions)\n\t\t\n\t\tfor i, solution in enumerate(solutions):\n\t\t\titems = solution.items()\n\t\t\t# sort by time\n\t\t\titems.sort(lambda a, b: cmp(a[1], b[1]))\n\t\t\t# sort by order\n\t\t\titems.sort(lambda a, b: cmp(a[0][0], b[0][0]))\n\t\t\t\n\t\t\tprint \"Solution number\", i + 1\n\t\t\t\n\t\t\ti = 1\n\t\t\tfor j in items:\n\t\t\t\tif j[0][0:1] != str(i):\n\t\t\t\t\tif \"enter\" in j[0] or \"finish\" in j[0]:\n\t\t\t\t\t\tprint j,\n\t\t\t\telse:\n\t\t\t\t\tprint \"\\n\",\n\t\t\t\t\tprint \"Order no:\", i\n\t\t\t\t\tif \"enter\" in j[0] or \"finish\" in j[0]:\n\t\t\t\t\t\tprint j,\n\t\t\t\t\ti += 1\n\t\t\tprint \"\\n==============================================\\n\",\n\t\tprint \"Number of solutions:\", numberOfSolutions\n\t\treturn solutions, numberOfSolutions",
"def get_conflicts(self):\n return []",
"def cheat(self) -> List[str]:\n all_possible_words = self.trie.get_all_possible_words(\n self.get_current_reels_letters()\n )\n better_words = OrderedDict()\n for word in all_possible_words:\n score = self.scorer.calculate_word_score(word)\n if len(better_words) > 2:\n first_word = next(iter(better_words.items()))\n if first_word[0] < score:\n better_words.popitem(last=False)\n better_words[score] = word\n else:\n better_words[score] = word\n better_words = OrderedDict(sorted(better_words.items()))\n return [f\"{word} ({score})\" for score, word in better_words.items()]",
"def get_all_sorted(self):\n self.sort_and_reduce()\n return self.data",
"def get_listu_postaja(self):\n popis = sorted(list(self.postaje))\n return popis",
"def get_assignments_ordered() -> list:\n user_assignments = Assignment.query.all() if current_user.is_admin else current_user.classgroup.assignments\n open_assignments = [x for x in user_assignments if not x.expired]\n closed_assignments = [x for x in user_assignments if x.expired]\n return open_assignments + closed_assignments",
"def sort_results(self):\n pass",
"def issues(self) -> List[IssueType]:\n return [IssueType.FREE_SPACE]",
"def solution(self):\n return [(\"simple 1\", 1.),\n (\"simple 2\", 1.),\n (\"simple 3\", 1.),\n (\"simple 4\", 1.),\n (\"simple 5\", 1.),\n (\"simple 10\", 1.),\n (\"simple 15\", 1.),\n (\"thai 1\", 1.),\n (\"thai 2\", 1.),\n (\"thai 3\", 1.),\n (\"thai 4\", 1.),\n (\"thai 5\", 1.),\n (\"thai 10\", 1.),\n (\"thai 15\", 1.),\n ]"
] | [
"0.6703581",
"0.6595729",
"0.62168723",
"0.6204288",
"0.6180413",
"0.61678904",
"0.6021558",
"0.5934097",
"0.5893262",
"0.58796406",
"0.58487594",
"0.5641037",
"0.5635462",
"0.5628445",
"0.5586737",
"0.5583212",
"0.54723084",
"0.5472102",
"0.547204",
"0.5451602",
"0.54358476",
"0.541411",
"0.54058313",
"0.5405021",
"0.5364568",
"0.5360199",
"0.5359589",
"0.5339819",
"0.53395486",
"0.53188103"
] | 0.75329185 | 0 |
Returns the name of the class attribute to be used for classification. | def get_class_attribute(self):
return self.class_attr | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def name(self) -> str:\n return self.class_names[self.class_num]",
"def get_attribute_class(self, attr_name):\n return self.attrs.get_attribute_class(attr_name)",
"def class_name(self) -> str:\n return pulumi.get(self, \"class_name\")",
"def get_attribute_class(self):\n return self._attr_cls",
"def class_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"class_name\")",
"def className(self):\n namevalue = self.__class__.__name__\n return str(namevalue)",
"def get_attribute_class(self, name):\n self.validate_attribute_name(name)\n return self.schema[name].get_attribute_class()",
"def get_class_name(self):\n\n if \"class\" in self._root.attrib:\n return self._root.attrib['class']\n else:\n return self._root.tag",
"def f_get_class_name(self):\n return self.__class__.__name__",
"def class_attribute(self):\n\n return getattr(self.parent.class_, self.key)",
"def name(self):\n return self._get_device_class_name()",
"def __id_attr_name(self):\n return self._attr_name()",
"def class_name(self) -> str:\n return self.__class__.__name__",
"def AttributeString(self) -> str:",
"def AttributeString(self) -> str:",
"def class_name(cls):\n return cls.__name__",
"def class_name(cls):\n return cls.__name__",
"def getClassName(self):\n n = type(self).__name__\n return n",
"def name_to_label(self, name):\n\t\treturn self.classes[name]",
"def name_to_label(self, name):\n\t\t\treturn self.classes[name]",
"def get_attr_name(self, instance):\n if not hasattr(self, '_attr_name'):\n cls = type(instance)\n self._attr_name = get_descriptor_attr_name(self, cls)\n\n assert self._attr_name is not None, (\n 'Could not find the attribute for %r on %r' % (self, cls))\n\n return self._attr_name",
"def classname(cls):\n return cls.__name__.lower()",
"def name_to_label(self, name):\n return self.classes[name]",
"def attribute_key(self) -> str:\n return pulumi.get(self, \"attribute_key\")",
"def _get_classname(cls):\n return cls.__name__",
"def attribute_name(name: str) -> str:\n return text.snake_case(utils.safe_snake(name))",
"def type_name(attr_type: AttrType) -> str:\n return attr_type.native_name or class_name(attr_type.name)",
"def classifier(self):\n return self.config.get('classifier', \"general\")",
"def getCustomAttribute(self):\n\t\treturn self.Attribute",
"def name(cls) -> str:\n return cls.__name__ # type: ignore[attr-defined]"
] | [
"0.7220845",
"0.7191606",
"0.7166631",
"0.70328903",
"0.6854248",
"0.68273634",
"0.68061316",
"0.6639919",
"0.66164047",
"0.65259147",
"0.64209676",
"0.64048225",
"0.63896745",
"0.637577",
"0.637577",
"0.6366623",
"0.6366623",
"0.63627464",
"0.63504124",
"0.63288695",
"0.63267916",
"0.63187",
"0.63071144",
"0.6285846",
"0.62599534",
"0.62588763",
"0.6222137",
"0.6205986",
"0.6203427",
"0.61956066"
] | 0.7351364 | 0 |
Returns the value used in the dataset to indicate the positive classification choice. | def get_positive_class_val(self, tag):
# FIXME this dependence between tags and metadata is bad; don't know how to fix it right now
if tag == 'numerical-binsensitive':
return 1
else:
return self.positive_class_val | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __value_of(sentiment):\n if sentiment == 'positive': return 1\n if sentiment == 'negative': return -1\n return 0",
"def get_prediction_from_score(score):\n if(score >= 0.03):\n return 'Positive'\n elif(score <= -0.03):\n return 'Negative'\n else:\n return 'Neutral'",
"def predict(self,X):\n if (int(self.classifier.predict(self.scaler.transform(X)))==-1):\n return \"popular\"\n else:\n return \"unpopular\"",
"def classify(self,X):\n return int(self.classifier.predict(self.scaler.transform(X)))",
"def classify(self,X):\n return int(self.classifier.predict(self.scaler.transform(X)))",
"def classify(cls, i):\r\n # language_model \r\n if i[2] == None:\r\n return 1\r\n elif (float(i[2])) <= -7.848941176618522:\r\n return 0\r\n else:\r\n return 1",
"def classify(cls, i):\r\n # language_model \r\n if i[2] == None:\r\n return 1\r\n elif (float(i[2])) <= -8.357419966171143:\r\n return 1\r\n else:\r\n return 0",
"def get_value(self):\r\n return 0",
"def classification(self) -> 'outputs.CaseClassificationResponse':\n return pulumi.get(self, \"classification\")",
"def negative_predictive_value(y_true, y_pred):\n\n cm = confusion_matrix(y_true, y_pred)\n return cm[0,0] / cm[:,0].sum()",
"def predict(self,X):\n if (int(self.classifier.predict(self.scaler.transform(X)))==1):\n return \"increase\"\n elif (int(self.classifier.predict(self.scaler.transform(X)))==0):\n return \"keep\"\n else:\n return \"decrease\"",
"def purity_test(self):\n mean = filter_data(self.data,self.ancestors)['Class'].mean()\n if mean == 0:\n return 0\n elif mean == 1:\n return 1\n return None",
"def get_classification(self, idx):\n if idx in self.min_indices:\n return \"minimum\"\n elif idx in self.max_indices:\n return \"maximum\"\n return \"regular\"",
"def predict(self, x):\n if(self.score(x) > 0):\n return 1\n return -1",
"def predict(self, x):\n if(self.score(x) > 0):\n return 1\n return -1",
"def predict(self, x):\n if(self.score(x) > 0):\n return 1\n return -1",
"def predict(self, x):\n if(self.score(x) > 0):\n return 1\n return -1",
"def getDecision(self):\n return self.decision",
"def getlabel(scores):\n main_score = scores['compound']\n if main_score > 0.1:\n return 'pos'\n elif main_score < -0.1:\n return 'neg'\n else:\n return 'neu'",
"def classify(some_string, trained_pos, trained_neg):\n pos_probability = get_probability(trained_pos, some_string)\n neg_probability = get_probability(trained_neg, some_string)\n if (pos_probability >= neg_probability):\n return \"positive\"\n elif pos_probability < neg_probability: \n return \"negative\"",
"def predict(self, X):\n res = self.predict_proba(X)\n positive_mask = res >= 0.5\n negative_mask = res < 0.5\n res[positive_mask] = self.POSITIVE_CLASS\n res[negative_mask] = self.NEGATIVE_CLASS\n return res",
"def GetValue(self):\n \n return self.choices[self.selected].GetLabel()",
"def predict(self, X):\r\n \r\n # To speed up, we apply the scoring function to all the instances\r\n # at the same time.\r\n scores = X.dot(self.w)\r\n \r\n # Create the output array.\r\n # At the positions where the score is positive, this will contain\r\n # self.positive class, otherwise self.negative_class.\r\n out = numpy.select([scores>=0.0, scores<0.0], [self.positive_class, \r\n self.negative_class])\r\n return out",
"def predict_category(self):\n pass",
"def __neg__(self):\n if self.value == NEG:\n return TRIT_POS\n elif self.value == POS:\n return TRIT_NEG\n else:\n return TRIT_ZERO",
"def value(self):\n return 0",
"def sensitivity(self):\n return self.recall",
"def classification_score(self, x, y):\t\n\t\tpass",
"def classify_snp_prediction(row):\n\n if row['true_snp'] == True and row['prediction'] == True:\n return 'true_positive'\n elif row['true_snp'] == False and row['prediction'] == True:\n return 'false_positive'\n elif row['true_snp'] == False and row['prediction'] == False:\n return 'true_negative'\n else:\n return 'false_negative' # can occur here because we check against prediction, not calls this time",
"def predict(self, X):\r\n return 1 if self.predict_prob(X) > 0.5 else 0"
] | [
"0.6920214",
"0.6468132",
"0.6377437",
"0.6298673",
"0.6298673",
"0.6276346",
"0.62528133",
"0.62347436",
"0.6230235",
"0.61446166",
"0.61428374",
"0.6095599",
"0.60748625",
"0.6050616",
"0.6050616",
"0.6050616",
"0.6050616",
"0.6029829",
"0.60096",
"0.5973834",
"0.5958873",
"0.5956844",
"0.59472567",
"0.5940582",
"0.5926571",
"0.5919561",
"0.590596",
"0.5897776",
"0.5891463",
"0.588618"
] | 0.73992556 | 0 |
Returns a list of the names of any sensitive / protected attribute(s) that will be used for a fairness analysis and should not be used to train the model. | def get_sensitive_attributes(self):
return self.sensitive_attrs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __listAttr(self):\n attr = dir(self) # already sorted\n filter = []\n for name in attr:\n if name[:2] == '__': pass\n elif name[:10] == '_HelpDoc__': pass # used to mask private attr\n elif name in self.__exclude: pass\n else: filter.append(name)\n return filter",
"def attrs(self):\n return list(name for name in self.__dict__\n if not name.startswith(\"_\"))",
"def keys(self):\n return list(s.name.lower() for s in self.attributes)",
"def get_attribute_names(cls):\n return cls._attributes.keys()",
"def attributes(self):\n attr = super(BaseDataRecord, self).attributes()\n return [a for a in attr if a not in self.excluded()]",
"def get_attribute_list(self):\n attributes = [attr for attr in vars(self.entries[0]) if not attr.startswith('__')]\n return attributes",
"def keys(self):\n return [a.name for a in self.__attrs_attrs__]",
"def _getAttributeNames(self):\n return sorted(self._field_map.keys())",
"def getDataAttributes(self):\n asRet = [];\n asAttrs = dir(self);\n for sAttr in asAttrs:\n if sAttr[0] == '_' or sAttr[0] == 'k':\n continue;\n if sAttr in self.kasInternalAttributes:\n continue;\n oValue = getattr(self, sAttr);\n if callable(oValue):\n continue;\n asRet.append(sAttr);\n return asRet;",
"def get_attributes(cls):\r\n return [\r\n Attribute(\"show_correctness\", \"always\"),\r\n Attribute(\"submitted_message\", \"Answer received.\"),\r\n Attribute(\"label\", \"\"),\r\n ]",
"def get_privileged_class_names(self, tag):\n # FIXME this dependence between tags and privileged class names is bad; don't know how to\n # fix it right now\n if tag == 'numerical-binsensitive':\n return [1 for x in self.get_sensitive_attributes()]\n else:\n return self.privileged_class_names",
"def attrs(self):\n return [name for name in self.traits() if self.trait_metadata(name, \"attr\")]",
"def attrs(self):\n return [name for name in self.traits() if self.trait_metadata(name, \"attr\")]",
"def get_attrs(self):\n attrs = []\n for attribute in self.__dict__.keys():\n attrs.append(attribute)",
"def attribute_restrictions(self):\n return self._attribute_restrictions",
"def attributes(self) -> Set[str]:\n return set()",
"def get_sensitive_attributes_with_joint(self):\n if len(self.get_sensitive_attributes()) > 1:\n return self.get_sensitive_attributes() + ['-'.join(self.get_sensitive_attributes())]\n return self.get_sensitive_attributes()",
"def keys(self):\n\n keys = list()\n\n for attribute_name, type_instance in inspect.getmembers(self):\n\n if attribute_name.startswith('__') or inspect.ismethod(type_instance):\n # Ignore parameters with __ and if they are methods\n continue\n\n keys.append(attribute_name)\n\n return keys",
"def all(self):\r\n return self.attr_dict.keys()",
"def testattributes(self):\n for attr in ('ST', 'DX', 'IQ', 'MA', 'Dam', 'Hit'):\n AttributeAbility([attr,])",
"def get_attributes(cls):\r\n return []",
"def getAttributes(self):\n pass",
"def get_attribute_list(self):\n return self.dp.get_attribute_list()",
"def attributes(self):\n return self.__dict.keys()",
"def attributes(self):\n\n attributes = []\n\n for member in self.members:\n if member.attribute:\n attributes.append(member)\n\n return attributes",
"def attributes(self):\n attrs_ = [\"name\", \"type\"]\n if self.exclusive:\n attrs_.append(\"exclusive\")\n if self.default is not None:\n attrs_.append(\"default\")\n\n return attrs_",
"def keys(self):\n\n return self._uniforms.keys() + self._attributes.keys()",
"def _get_all_attributes(self):\n\n attributes= []\n for shader in self._verts:\n attributes.extend(shader.attributes)\n # No attribute in fragment shaders\n attributes = list(set(attributes))\n return attributes",
"def listglobal(self):\n return list(self.attributes.keys())",
"def filter_attributes_choices(self): \n filter_attributes = [\n 'no filters',\n 'user_id',\n 'device_id',\n 'device_first_seen_ts',\n 'device_first_view_ts', \n 'platform',\n 'platform_type',\n 'country',\n 'region',\n 'city',\n 'dma',\n 'os',\n 'os_version',\n 'manufacturer',\n 'app_mode',\n 'app_version',\n 'device_language',\n 'content_id',\n 'program_id',\n 'content_type',\n 'tvt_sec' # note: here tvt_sec is treated as an attribute rather than a cumulative metric\n ]\n return filter_attributes"
] | [
"0.70694345",
"0.66682184",
"0.6661344",
"0.66439575",
"0.66118485",
"0.654151",
"0.6416619",
"0.6314357",
"0.63003594",
"0.626828",
"0.6239923",
"0.62366736",
"0.62366736",
"0.6221969",
"0.6175809",
"0.61643624",
"0.61223054",
"0.6114993",
"0.61009276",
"0.6096935",
"0.6096455",
"0.60956854",
"0.60580903",
"0.604152",
"0.604023",
"0.603697",
"0.5994728",
"0.5976761",
"0.59743744",
"0.5970619"
] | 0.7331742 | 0 |
Same as get_sensitive_attributes, but also includes the joint sensitive attribute if there is more than one sensitive attribute. | def get_sensitive_attributes_with_joint(self):
if len(self.get_sensitive_attributes()) > 1:
return self.get_sensitive_attributes() + ['-'.join(self.get_sensitive_attributes())]
return self.get_sensitive_attributes() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_sensitive_attributes(self):\n return self.sensitive_attrs",
"def get_sensitive_terms(self):\n sensitive_terms_dict = {}\n for attribute in self.__non_redundant_entity_attributes:\n for record_id, sensitive_terms in self.__df[attribute].dropna().iteritems():\n for sensitive_term in sensitive_terms:\n cleaned_sensitive_term = \" \".join([t.lemma_.lower() for t in sensitive_term if not t.is_stop])\n if len(cleaned_sensitive_term) > 0:\n sensitive_terms_dict.setdefault(attribute, {}).setdefault(cleaned_sensitive_term, set()).add(record_id)\n\n # Sort sensitive terms dict alphabetically to have a deterministic order\n sensitive_terms_dict = {el[0]: el[1] for el in sorted(sensitive_terms_dict.items(), key=lambda x: x)}\n\n # Sort sensitive terms dict ascending by number terms per entity type\n sensitive_terms_dict = {el[0]: el[1] for el in sorted(sensitive_terms_dict.items(), key=lambda x: len(x[1]))}\n\n for attribute, sensitive_terms in sensitive_terms_dict.items():\n word = \"terms\"\n if len(sensitive_terms) == 1:\n word = \"term\"\n logger.info(\"Found %d distinct sensitive %s within attribute %s\", len(sensitive_terms), word, attribute)\n return sensitive_terms_dict",
"def _get_include_attributes(cls, args, extra_attributes=None):\n extra_attributes = extra_attributes or []\n\n include_attributes = []\n\n if extra_attributes:\n include_attributes.extend(extra_attributes)\n\n # If user specifies which attributes to retrieve via CLI --attr / -a argument, take that\n # into account\n\n # Special case for \"all\"\n if \"all\" in args.attr:\n return None\n\n for attr in args.attr:\n include_attributes.append(attr)\n\n if include_attributes:\n return include_attributes\n\n display_attributes = getattr(cls, \"display_attributes\", [])\n\n if display_attributes:\n include_attributes += display_attributes\n\n include_attributes = list(OrderedSet(include_attributes))\n\n return include_attributes",
"def get_redundant_entity_attributes(self):\n return self.__redundant_entity_attributes",
"def _filtered_attributes(\n self, required_attributes: Union[Iterable, Literal[\"__all__\"]], dontformat=False\n ) -> Tuple[Dict, Dict]:\n if required_attributes == \"__all__\":\n required_attributes = self.__atomic_fields_set__ | {\"meta\"}\n required_attributes = set(required_attributes)\n errors = []\n attrs = {name: getattr(self, name, None) for name in required_attributes-{\"meta\"}}\n for name in required_attributes - {\"meta\"}:\n if name not in self.__atomic_fields_set__:\n errors.append(f\" Unexpected required attribute: '{name}'.\")\n continue\n if attrs.get(name) is None:\n if not utils.is_an_optional_type_hint(self.__fields_types__[name]):\n errors.append(f\" Missing required attribute: '{name}'.\")\n if errors:\n raise ValueError(\"\\n\" + \"\\n\".join(errors))\n attrs = {\n utils.snake_to_camel_case(k, dontformat): v\n for (k, v) in attrs.items()\n if k in set(required_attributes) - self._identifier_fields\n }\n meta_attrs = {\n utils.snake_to_camel_case(name, dontformat): getattr(self, name)\n for name in self.__meta_attributes__\n if getattr(self, name) is not None\n } if \"meta\" in required_attributes else None\n return attrs, meta_attrs",
"def attributes(self):\n attr = super(BaseDataRecord, self).attributes()\n return [a for a in attr if a not in self.excluded()]",
"def extra_state_attributes(self) -> dict[str, Any]:\n return self._attributes",
"def extra_state_attributes(self):\n return {attr: getattr(self, '_' + prop)\n for attr, prop in ATTRIBUTES_IRHVAC.items()}",
"def extra_state_attributes(self) -> dict[str, Any]:\n ret = {\n ATTR_SOURCE: self._source_entity_id,\n ATTR_COEFFICIENTS: self._coefficients,\n }\n if self._source_attribute:\n ret[ATTR_SOURCE_ATTRIBUTE] = self._source_attribute\n return ret",
"def extra_from_record(self, record):\n return {\n attr_name: record.__dict__[attr_name]\n for attr_name in record.__dict__\n if attr_name not in BUILTIN_ATTRS\n }",
"def attributes(self) -> Optional[Mapping[str, Sequence[str]]]:\n return pulumi.get(self, \"attributes\")",
"def extra_state_attributes(self):\n return self._attributes",
"def clean_attributes(self):\n attrs = {}\n\n # Only fetch the fields we need.\n for a in self.attributes.only('name', 'value', 'attribute').iterator():\n if a.attribute.multi:\n if a.name not in attrs:\n attrs[a.name] = []\n attrs[a.name].append(a.value)\n else:\n attrs[a.name] = a.value\n self._attributes_cache = attrs # Cache the attributes\n\n return attrs",
"def extra_state_attributes(self):\r\n return self._attributes",
"def localized_attributes(self) -> Optional[Mapping[str, Mapping[str, str]]]:\n return pulumi.get(self, \"localized_attributes\")",
"def senate_attribs(self):\n return self.senate_votes[4]",
"def extra_state_attributes(self):\n return self._attributes",
"def extra_state_attributes(self):\n return {ATTR_DEVICE: \"SKYBEACON\", ATTR_MODEL: 1}",
"def extra_state_attributes(self):\n return {ATTR_DEVICE: \"SKYBEACON\", ATTR_MODEL: 1}",
"def get_non_redundant_entity_attributes(self):\n return self.__non_redundant_entity_attributes",
"def attributes(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"attributes\")",
"def attributes(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"attributes\")",
"def sensitive(self, value: object) -> Union[sensitive, object]:\n return wrap_sensitive_value(value)",
"def extra_state_attributes(self):\n attr = self._attributes\n if self.tesla_device.has_battery():\n attr[ATTR_BATTERY_LEVEL] = self.tesla_device.battery_level()\n attr[ATTR_BATTERY_CHARGING] = self.tesla_device.battery_charging()\n return attr",
"def _get_feature_attributes(self) -> dict:\n srs = pd.Series(dir(self))\n srs = srs[\n (~srs.str.startswith('_'))\n & (~srs.str.contains('as_'))\n & (srs != 'putin')\n & (srs != 'takeout')\n & (srs != 'intermediate_accesses')\n & (srs != 'geometry')\n & (srs != 'has_a_point')\n & (srs != 'centroid')\n ]\n srs = srs[srs.apply(lambda p: not hasattr(getattr(self, p), '__call__'))]\n return {key: getattr(self, key) for key in srs}",
"def extra_state_attributes(self):\n state_attr = {}\n if self.vendor_id is not None:\n state_attr[ATTR_VENDOR_ID] = self.vendor_id\n state_attr[ATTR_VENDOR_NAME] = self.vendor_name\n if self.type_id is not None:\n state_attr[ATTR_TYPE_ID] = self.type_id\n state_attr[ATTR_TYPE] = self.type\n if self.physical_address is not None:\n state_attr[ATTR_PHYSICAL_ADDRESS] = self.physical_address\n return state_attr",
"def get_attributes(self) -> Dict[str, str]:\n pass",
"def extra_state_attributes(self):\n return {\n ATTR_SOURCE_ENTITY: self._source_entity,\n ATTR_SOURCE_DOMAIN: self._source_domain,\n }",
"def key_attributes(self):\n\n return [level.key for level in self.levels]",
"def extra_state_attributes(self):\n return {ATTR_ATTRIBUTION: ATTRIBUTION}"
] | [
"0.7723388",
"0.61657095",
"0.5457752",
"0.53589123",
"0.5331123",
"0.5112113",
"0.5105332",
"0.51015556",
"0.5088783",
"0.50400466",
"0.5025615",
"0.5014114",
"0.5013227",
"0.49880865",
"0.4979075",
"0.495395",
"0.4940964",
"0.49345672",
"0.49345672",
"0.49008775",
"0.48915753",
"0.48915753",
"0.4867706",
"0.48594257",
"0.48439616",
"0.48341948",
"0.47859508",
"0.4782373",
"0.47820234",
"0.47594118"
] | 0.85016006 | 0 |
Returns a list in the same order as the sensitive attributes list above of the privileged class name (exactly as it appears in the data) of the associated sensitive attribute. | def get_privileged_class_names(self, tag):
# FIXME this dependence between tags and privileged class names is bad; don't know how to
# fix it right now
if tag == 'numerical-binsensitive':
return [1 for x in self.get_sensitive_attributes()]
else:
return self.privileged_class_names | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getDataAttributes(self):\n asRet = [];\n asAttrs = dir(self);\n for sAttr in asAttrs:\n if sAttr[0] == '_' or sAttr[0] == 'k':\n continue;\n if sAttr in self.kasInternalAttributes:\n continue;\n oValue = getattr(self, sAttr);\n if callable(oValue):\n continue;\n asRet.append(sAttr);\n return asRet;",
"def get_attribute_list(self):\n attributes = [attr for attr in vars(self.entries[0]) if not attr.startswith('__')]\n return attributes",
"def __listAttr(self):\n attr = dir(self) # already sorted\n filter = []\n for name in attr:\n if name[:2] == '__': pass\n elif name[:10] == '_HelpDoc__': pass # used to mask private attr\n elif name in self.__exclude: pass\n else: filter.append(name)\n return filter",
"def get_sensitive_attributes(self):\n return self.sensitive_attrs",
"def attrs(self):\n return list(name for name in self.__dict__\n if not name.startswith(\"_\"))",
"def keys(self):\n return list(s.name.lower() for s in self.attributes)",
"def keys(self):\n return [a.name for a in self.__attrs_attrs__]",
"def get_attrs(self):\n attrs = []\n for attribute in self.__dict__.keys():\n attrs.append(attribute)",
"def get_attribute_list(self):\n return self.dp.get_attribute_list()",
"def keys(self):\n\n keys = list()\n\n for attribute_name, type_instance in inspect.getmembers(self):\n\n if attribute_name.startswith('__') or inspect.ismethod(type_instance):\n # Ignore parameters with __ and if they are methods\n continue\n\n keys.append(attribute_name)\n\n return keys",
"def attributes(self):\n\n attributes = []\n\n for member in self.members:\n if member.attribute:\n attributes.append(member)\n\n return attributes",
"def get_attributes(cls):\r\n return []",
"def ListAttributes(self):\n\n print(\"\\n\")\n print(\"Attributes List of: \" + repr(self.__dict__[\"name\"]) + \" - \" + self.__class__.__name__ + \" Instance\\n\")\n self_keys = self.__dict__.keys()\n self_keys.sort()\n for key in self_keys:\n if key != \"name\":\n print(str(key) + \" : \" + repr(self.__dict__[key]))\n # end\n # end\n print(\"\\n\")",
"def values(self):\n return [getattr(self, a.name) for a in self.__attrs_attrs__]",
"def attrs(self):\n return [name for name in self.traits() if self.trait_metadata(name, \"attr\")]",
"def attrs(self):\n return [name for name in self.traits() if self.trait_metadata(name, \"attr\")]",
"def attributes(self):\n\n return list(self._attributes.values())",
"def attributes(self):\n attr = super(BaseDataRecord, self).attributes()\n return [a for a in attr if a not in self.excluded()]",
"def key_attributes(self):\n\n return [level.key for level in self.levels]",
"def _attrlist(self,obj, attrs):\n vlist = [obj.__getattribute__(attr) for attr in attrs]\n return vlist",
"def listglobal(self):\n return list(self.attributes.keys())",
"def _getAttributeNames(self):\n return sorted(self._field_map.keys())",
"def extractAttrs(data):\n\treturn [instance[1:] for instance in data]",
"def attributes(self):\n return self.__dict.keys()",
"def get_attribute_names(cls):\n return cls._attributes.keys()",
"def get_class_attr_list(self, o):\n alist = None # Attributes to store\n ff = None # Load filter function\n for cl in self.classes:\n if isinstance(o, cl):\n alist = self.classes[cl][0]\n ff = self.classes[cl][1]\n break\n if isinstance(o, Block._ComponentDataClass):\n # If you're here you are trying to serialize an element of an\n # indexed block at the top level. We do want to allow that, so\n # we'll pretend it's a block.\n alist = self.classes[Block][0]\n ff = self.classes[Block][1]\n return (alist, ff)",
"def key_attributes(self):\n\n return [level.key for level in self._levels.values()]",
"def get_data_class_attr_list(self, o):\n alist = None # Attributes to store\n ff = None # Load filter function\n for cl in self.data_classes:\n if isinstance(o, cl):\n alist = self.data_classes[cl][0]\n ff = self.data_classes[cl][1]\n break\n return (alist, ff)",
"def all_attributes(self):\n\n attributes = []\n for level in self.levels:\n attributes.extend(level.attributes)\n\n return attributes",
"def getAttributes(self):\n pass"
] | [
"0.70606923",
"0.7041535",
"0.69863814",
"0.6956516",
"0.6759619",
"0.66697687",
"0.66511804",
"0.6639891",
"0.64760447",
"0.638376",
"0.6333704",
"0.632314",
"0.62966156",
"0.61981976",
"0.6196645",
"0.6196645",
"0.6156528",
"0.6151096",
"0.61357003",
"0.6134827",
"0.6129417",
"0.6113867",
"0.60566044",
"0.6043393",
"0.6015978",
"0.60054904",
"0.59888685",
"0.5978978",
"0.5948422",
"0.594416"
] | 0.7225402 | 0 |
Same as get_privileged_class_names, but also includes the joint sensitive attribute if there is more than one sensitive attribute. | def get_privileged_class_names_with_joint(self, tag):
priv_class_names = self.get_privileged_class_names(tag)
if len(priv_class_names) > 1:
return priv_class_names + ['-'.join(str(v) for v in priv_class_names)]
return priv_class_names | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_privileged_class_names(self, tag):\n # FIXME this dependence between tags and privileged class names is bad; don't know how to\n # fix it right now\n if tag == 'numerical-binsensitive':\n return [1 for x in self.get_sensitive_attributes()]\n else:\n return self.privileged_class_names",
"def get_sensitive_attributes_with_joint(self):\n if len(self.get_sensitive_attributes()) > 1:\n return self.get_sensitive_attributes() + ['-'.join(self.get_sensitive_attributes())]\n return self.get_sensitive_attributes()",
"def get_sensitive_attributes(self):\n return self.sensitive_attrs",
"def protected_classifications(self):\n if self.customer is None:\n return None\n return sorted([b.name for b in self._customer.protected_species if b.deployed])",
"def class_names(self):\n raise NotImplementedError",
"def get_attribute_names(cls):\n return cls._attributes.keys()",
"def _get_include_attributes(cls, args, extra_attributes=None):\n extra_attributes = extra_attributes or []\n\n include_attributes = []\n\n if extra_attributes:\n include_attributes.extend(extra_attributes)\n\n # If user specifies which attributes to retrieve via CLI --attr / -a argument, take that\n # into account\n\n # Special case for \"all\"\n if \"all\" in args.attr:\n return None\n\n for attr in args.attr:\n include_attributes.append(attr)\n\n if include_attributes:\n return include_attributes\n\n display_attributes = getattr(cls, \"display_attributes\", [])\n\n if display_attributes:\n include_attributes += display_attributes\n\n include_attributes = list(OrderedSet(include_attributes))\n\n return include_attributes",
"def classes(attrs):\n return attrs.get('class', '').split()",
"def keys(self):\n return list(s.name.lower() for s in self.attributes)",
"def __listAttr(self):\n attr = dir(self) # already sorted\n filter = []\n for name in attr:\n if name[:2] == '__': pass\n elif name[:10] == '_HelpDoc__': pass # used to mask private attr\n elif name in self.__exclude: pass\n else: filter.append(name)\n return filter",
"def keys(self):\n return [a.name for a in self.__attrs_attrs__]",
"def get_feature_names(self):\n return [self.__class__.__name__]",
"def get_sensitive_terms(self):\n sensitive_terms_dict = {}\n for attribute in self.__non_redundant_entity_attributes:\n for record_id, sensitive_terms in self.__df[attribute].dropna().iteritems():\n for sensitive_term in sensitive_terms:\n cleaned_sensitive_term = \" \".join([t.lemma_.lower() for t in sensitive_term if not t.is_stop])\n if len(cleaned_sensitive_term) > 0:\n sensitive_terms_dict.setdefault(attribute, {}).setdefault(cleaned_sensitive_term, set()).add(record_id)\n\n # Sort sensitive terms dict alphabetically to have a deterministic order\n sensitive_terms_dict = {el[0]: el[1] for el in sorted(sensitive_terms_dict.items(), key=lambda x: x)}\n\n # Sort sensitive terms dict ascending by number terms per entity type\n sensitive_terms_dict = {el[0]: el[1] for el in sorted(sensitive_terms_dict.items(), key=lambda x: len(x[1]))}\n\n for attribute, sensitive_terms in sensitive_terms_dict.items():\n word = \"terms\"\n if len(sensitive_terms) == 1:\n word = \"term\"\n logger.info(\"Found %d distinct sensitive %s within attribute %s\", len(sensitive_terms), word, attribute)\n return sensitive_terms_dict",
"def attrs(self):\n return list(name for name in self.__dict__\n if not name.startswith(\"_\"))",
"def check_unused_attributes(self):\n all_attrs_read = collections.defaultdict(set)\n\n def _add_attrs(typ, attr_names_read):\n if typ is None:\n return\n all_attrs_read[typ] |= attr_names_read\n for base_cls in typ.__bases__:\n all_attrs_read[base_cls] |= attr_names_read\n if isinstance(typ, type):\n for child_cls in qcore.inspection.get_subclass_tree(typ):\n all_attrs_read[child_cls] |= attr_names_read\n\n for serialized, attrs_read in six.iteritems(self.attributes_read):\n attr_names_read = {attr_name for attr_name, _, _ in attrs_read}\n _add_attrs(self.unserialize_type(serialized), attr_names_read)\n\n for typ, attrs in self.config.IGNORED_UNUSED_ATTRS_BY_CLASS:\n _add_attrs(typ, attrs)\n\n used_bases = tuple(self.config.USED_BASE_CLASSES)\n\n for typ, attrs_read in sorted(\n six.iteritems(all_attrs_read), key=self._cls_sort\n ):\n if self.serialize_type(typ) not in self.classes_examined or issubclass(\n typ, used_bases\n ):\n continue\n existing_attrs = set(typ.__dict__.keys())\n for attr in existing_attrs - attrs_read - self.config.IGNORED_UNUSED_ATTRS:\n # server calls will always show up as unused here\n if _safe_getattr(_safe_getattr(typ, attr, None), \"server_call\", False):\n continue\n print(\"Unused method: %r.%s\" % (typ, attr))",
"def allowed_runtime_class_names(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"allowed_runtime_class_names\")",
"def _get_base_class_names(frame):\n co, lasti = frame.f_code, frame.f_lasti\n code = co.co_code\n i = 0\n extended_arg = 0\n extends = []\n while i <= lasti:\n c = code[i]\n op = ord(c)\n i += 1\n if op >= dis.HAVE_ARGUMENT:\n oparg = ord(code[i]) + ord(code[i + 1]) * 256 + extended_arg\n extended_arg = 0\n i += 2\n if op == dis.EXTENDED_ARG:\n extended_arg = oparg * int(65536)\n if op in dis.hasconst:\n if type(co.co_consts[oparg]) == str:\n extends = []\n elif op in dis.hasname:\n if dis.opname[op] == 'LOAD_NAME':\n extends.append(('name', co.co_names[oparg]))\n if dis.opname[op] == 'LOAD_ATTR':\n extends.append(('attr', co.co_names[oparg]))\n items = []\n previous_item = []\n for t, s in extends:\n if t == 'name':\n if previous_item:\n items.append(previous_item)\n previous_item = [s]\n else:\n previous_item += [s]\n if previous_item:\n items.append(previous_item)\n return items",
"def get_extra(self):\n\t\tselect = []\n\t\tfor cls in range(1, NCLASSES + 1):\n\t\t\tselect.append(where(self.labels == cls)[0][:self.limits[cls - 1]])\n\t\tfilter = concatenate(select)\n\t\treturn self.data[filter, :], self.labels[filter]",
"def relevant_classifications(self):\n return self.relevant_classes",
"def getFeatureClassNames(self):\n return self.featureClasses.keys()",
"def keys(self):\n\n keys = list()\n\n for attribute_name, type_instance in inspect.getmembers(self):\n\n if attribute_name.startswith('__') or inspect.ismethod(type_instance):\n # Ignore parameters with __ and if they are methods\n continue\n\n keys.append(attribute_name)\n\n return keys",
"def thrift_attrs(obj_or_cls) -> List[str]:\n return [v[1] for v in obj_or_cls.thrift_spec.values()]",
"def get_cryptomatte_names(self):\n return [self.cryptomattes[x][\"name\"] for x in self.cryptomattes]",
"def attributes(self):\n attr = super(BaseDataRecord, self).attributes()\n return [a for a in attr if a not in self.excluded()]",
"def getDataAttributes(self):\n asRet = [];\n asAttrs = dir(self);\n for sAttr in asAttrs:\n if sAttr[0] == '_' or sAttr[0] == 'k':\n continue;\n if sAttr in self.kasInternalAttributes:\n continue;\n oValue = getattr(self, sAttr);\n if callable(oValue):\n continue;\n asRet.append(sAttr);\n return asRet;",
"def get_attribute_list(self):\n attributes = [attr for attr in vars(self.entries[0]) if not attr.startswith('__')]\n return attributes",
"def class_trait_names ( cls, **metadata ):\n return cls.class_traits( **metadata ).keys()",
"def getAttributes(clazz):\n return {name: attr for name, attr in clazz.__dict__.items()\n if not name.startswith(\"__\") \n and not callable(attr)\n and not type(attr) is staticmethod\n and not type(attr) is classmethod}",
"def _GetSuspectedCLsWithOnlyCLInfo(suspected_cls):\n simplified_suspected_cls = []\n for cl in suspected_cls:\n simplified_cl = {\n 'repo_name': cl['repo_name'],\n 'revision': cl['revision'],\n 'commit_position': cl['commit_position'],\n 'url': cl['url']\n }\n simplified_suspected_cls.append(simplified_cl)\n return simplified_suspected_cls",
"def _detectors_attrs(self):\n if self._det.get('detectors'):\n return self._det['detectors'].keys()\n else:\n return []"
] | [
"0.73706985",
"0.6840645",
"0.6202078",
"0.594918",
"0.5039532",
"0.50019443",
"0.49859846",
"0.49820405",
"0.49799612",
"0.49191874",
"0.49018767",
"0.48560244",
"0.48521727",
"0.48503172",
"0.48184666",
"0.478812",
"0.47643054",
"0.47436982",
"0.47109863",
"0.47010607",
"0.46969378",
"0.4690221",
"0.46895146",
"0.46829548",
"0.4679855",
"0.46590713",
"0.4658538",
"0.46548977",
"0.4650458",
"0.4638773"
] | 0.7298145 | 1 |
A passing grade in the Ricci data is defined as any grade above a 70 in the combined oral and written score. (See Miao 2010.) | def passing_grade(row):
if row['Combine'] >= 70.0:
return 1
else:
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def grade(self):\n if round(self.numAvg,0) >= 70:\n return round(self.numAvg,0)\n elif self.PassSummer:\n return 70\n elif round(self.numAvg,0) >= 55 and not self.PassSummer:\n return round(self.numAvg,0)\n else:\n return 55",
"def calc_grade(self, average):\n if 95 <= average:\n return 'S'\n elif 90 <= average:\n return 'A'\n elif 80 <= average:\n return 'B'\n elif 70 <= average:\n return 'C'\n elif 60 <= average:\n return 'D'\n else:\n return 'F'",
"def computeGrades(e1, e2, a):\n \n a = assignmentScores\n a.sort()\n i=0\n while i<10:\n sum+=sum a[i]\n avg = sum/10\n \n grade = ((e1 + e2) /2) * 0.4 + (avg) * 0.6\n \n return grade\n \n if grade >= 90 and grade <= 100:\n return(\"A\")\n \n elif grade >= 80 and grade < 90:\n return(\"B\")\n \n elif grade >= 70 and grade < 80:\n return(\"C\")\n \n elif grade >= 60 and grade < 70:\n return(\"D\")\n \n elif grade < 60:\n return(\"F\")",
"def calculate_gpa(score):\n if score < 60:\n return 0\n elif 60 <= score < 70:\n return 1\n elif 70 <= score < 80:\n return 2\n elif 80 <= score < 90:\n return 3\n elif score >= 90:\n return 4",
"def grade_calculate_grade(self):\n try:\n if int(self.root.ids.grade_input_grade.text) >= 85:\n grade = 'High Distinction'\n elif int(self.root.ids.grade_input_grade.text) >= 75:\n grade = 'Distinction'\n elif int(self.root.ids.grade_input_grade.text) >= 65:\n grade = 'Credit'\n elif int(self.root.ids.grade_input_grade.text) >= 50:\n grade = 'Pass'\n else:\n grade = 'Fail'\n self.root.ids.grade_output_label.text = 'Grade: ' + grade\n except ValueError:\n\n self.root.ids.grade_output_label.text = 'Invalid Grade'",
"def grade(first, second, third):\n score = round((first + second + third) / 3, 1)\n score_secondary = str(score)\n plusminus = int(score_secondary[1])\n if score > 100:\n return \"Wrong input, friend.\"\n else:\n if score >= 90:\n if plusminus >= 5:\n return \"A+\"\n else:\n return \"A-\"\n elif score >= 80:\n if plusminus >= 5:\n return \"B+\"\n else:\n return \"B-\"\n elif score >= 70:\n if plusminus >= 5:\n return \"C+\"\n else:\n return \"C-\"\n elif score >= 60:\n if plusminus >= 5:\n return \"D+\"\n else:\n return \"D-\"\n else:\n return \"F\"",
"def calc_score(score):\n if not score:\n return 0\n dbot_score = 1\n if score >= 95:\n dbot_score = 3\n elif score >= 75:\n dbot_score = 2\n return dbot_score",
"def g_score(self):\n _, _, I_CK = self._entropies()\n return 2.0 * I_CK",
"def get_grade(self) -> int :\n return self.grade",
"def test_grade(grade_list):\n num_grade=scale_grade(grade_list)\n trend=[]\n for ind in range(len(num_grade)-1):\n if num_grade[ind+1]>num_grade[ind]:\n trend.append(1)\n elif num_grade[ind+1]<num_grade[ind]:\n trend.append(-1)\n else:\n trend.append(0)\n \n if sum(trend)>0:\n return 1\n elif sum(trend)<0:\n return -1\n else:\n return 0",
"def test_classify_grade(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(s.classify_grade(5.00),\"A+\")",
"def test_a_grade(self):\r\n self.basic_setup()\r\n self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n self.submit_question_answer('p2', {'2_1': 'Correct'})\r\n self.submit_question_answer('p3', {'2_1': 'Correct'})\r\n self.check_grade_percent(1.0)\r\n self.assertEqual(self.get_grade_summary()['grade'], 'A')",
"def grade_for_percentage(grade_cutoffs, percentage):\r\n\r\n letter_grade = None\r\n\r\n # Possible grades, sorted in descending order of score\r\n descending_grades = sorted(grade_cutoffs, key=lambda x: grade_cutoffs[x], reverse=True)\r\n for possible_grade in descending_grades:\r\n if percentage >= grade_cutoffs[possible_grade]:\r\n letter_grade = possible_grade\r\n break\r\n\r\n return letter_grade",
"def is_good_qualtiative_example(iaa_score, ann1_total, ann2_total):\n return iaa_score > .3 and iaa_score < 1 and ann1_total > 3 and ann2_total > 3",
"def test_b_grade_above(self):\r\n self.basic_setup()\r\n self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n self.submit_question_answer('p2', {'2_1': 'Correct'})\r\n self.check_grade_percent(0.67)\r\n self.assertEqual(self.get_grade_summary()['grade'], 'B')",
"def grade(self, points):\n credit = -1\n while credit > points or credit < 0:\n try:\n credit = int(input(\"\\nScore out of \" + str(points) + \": \"))\n except:\n credit = -1\n if credit != points:\n self.feedback += \"\\n\\t\" + str(raw_input(\"Describe problem: \"))\n return credit",
"def grade_conversion(grade):\n grade_converter = {\"A\": 4.00, \"A-\":3.67, \"B+\": 3.33, \"B\": 3.00, \"B-\": 2.67, \"C+\": 2.33, \"C\": 2.00, \"C-\": 1.67, \"D\": 1.00, \"F\": 0.0}\n while True:\n for val, val2 in grade_converter.items():\n if grade == val:\n return val2",
"def RScore(x,p,d):\n \n if x <= d[p][0.20]:\n return 5\n elif x <= d[p][0.4]:\n return 4\n elif x <= d[p][0.6]: \n return 3\n elif x <= d[p][0.8]:\n return 2\n else:\n return 1",
"def RScore(x,p,d):\n \n if x <= d[p][0.20]:\n return 5\n elif x <= d[p][0.4]:\n return 4\n elif x <= d[p][0.6]: \n return 3\n elif x <= d[p][0.8]:\n return 2\n else:\n return 1",
"def grade_to_gpa(grade):\n\n letter_grade = \"\"\n gpa = 0.0\n\n if type(grade) is str:\n accepted_values = [\"A+\", \"A\", \"A-\", \"B+\", \"B\", \"B-\", \"FZ\"]\n\n # check that the grade is one of the accepted values\n if grade in accepted_values:\n\n # assign grade to letter_grade\n letter_grade = grade\n\n #If grade input is a string, but not an accepted value, raise a ValueError\n else:\n raise ValueError(\"Incorrect value. Grade must be an accepted letter grade.\")\n\n elif type(grade) is int:\n\n # check that grade is in the accepted range 0 to 100\n if 0 <= grade <= 100:\n\n # convert the numeric grade to a letter grade\n mark_to_letter = grade\n\n # assign the value to letter_grade\n # hint: letter_grade = mark_to_letter(grade)\n if mark_to_letter >= 90:\n letter_grade = \"A+\"\n elif mark_to_letter >= 85:\n letter_grade = \"A\"\n elif mark_to_letter >= 80:\n letter_grade = \"A-\"\n elif mark_to_letter >= 77:\n letter_grade = \"B+\"\n elif mark_to_letter >= 73:\n letter_grade = \"B\"\n elif mark_to_letter >= 70:\n letter_grade = \"B-\"\n else:\n letter_grade = \"FZ\"\n\n #If grade input is not in accepted range, raise ValueError\n else:\n raise ValueError(\"Incorrect value. Grade must be in the accepted range of 0 to 100.\")\n else:\n # raise a TypeError exception\n raise TypeError(\"Invalid type passed as parameter\")\n\n # write a long if-statement to convert letter_grade\n # assign the value to gpa\n if letter_grade == \"A+\":\n gpa = 4.0\n if letter_grade == \"A\":\n gpa = 4.0\n if letter_grade == \"A-\":\n gpa = 3.7\n if letter_grade == \"B+\":\n gpa = 3.3\n if letter_grade == \"B\":\n gpa = 3.0\n if letter_grade == \"B-\":\n gpa = 2.7\n if letter_grade == \"FZ\":\n gpa = 0.0\n\n #Return the gpa of the grade\n return gpa",
"def publish_grade(self):\r\n score = self.lcp.get_score()\r\n self.runtime.publish(\r\n self,\r\n 'grade',\r\n {\r\n 'value': score['score'],\r\n 'max_value': score['total'],\r\n }\r\n )\r\n\r\n return {'grade': score['score'], 'max_grade': score['total']}",
"def get_score(self, red_score, blue_score):\n if red_score < blue_score:\n return 0\n elif red_score > blue_score:\n return 1\n else:\n return 0.5",
"def credits_earned(self):\n\n if self.grade() >= 69.5:\n return self.nCredits\n else:\n return 0.0",
"def test_grade(self):\r\n # Sample variables x and y in the range [-10, 10]\r\n sample_dict = {'x': (-10, 10), 'y': (-10, 10)}\r\n\r\n # The expected solution is numerically equivalent to x+2y\r\n problem = self.build_problem(sample_dict=sample_dict,\r\n num_samples=10,\r\n tolerance=0.01,\r\n answer=\"x+2*y\")\r\n\r\n # Expect an equivalent formula to be marked correct\r\n # 2x - x + y + y = x + 2y\r\n input_formula = \"2*x - x + y + y\"\r\n self.assert_grade(problem, input_formula, \"correct\")\r\n\r\n # Expect an incorrect formula to be marked incorrect\r\n # x + y != x + 2y\r\n input_formula = \"x + y\"\r\n self.assert_grade(problem, input_formula, \"incorrect\")",
"def testDriver():\n exam1=90\n exam2=85\n assignmentScores = [50, 60, 70, 80, ]\n computeGrades(exam1, exam2, assignmentScores)",
"def average_grade(self):\n grade_sum = 0\n grades_length = 0\n for c in self.courses_grades:\n if c[1] != \"-\":\n grade_sum += int(c[1])\n grades_length += 1\n average = grade_sum / grades_length\n return average",
"def get_grade(self):\n return self.__grade_value",
"def base_contribute_score():\n return 1",
"def _score_to_decision(self, score):",
"def grade_opbg(score, breakpoints=[0,5,10,20,50,200], grades=list(range(7))):\n i = bisect.bisect_left(breakpoints, score)\n return grades[i]"
] | [
"0.7085485",
"0.6440944",
"0.63578296",
"0.63572264",
"0.6289942",
"0.6267479",
"0.60695726",
"0.59769577",
"0.5956954",
"0.59330213",
"0.5848372",
"0.5821019",
"0.58090127",
"0.58062154",
"0.57650155",
"0.57590044",
"0.5737819",
"0.5693649",
"0.5693649",
"0.5689963",
"0.56800187",
"0.5675765",
"0.56712085",
"0.56561005",
"0.5652554",
"0.5650292",
"0.56423664",
"0.5630729",
"0.5570134",
"0.5562054"
] | 0.7371235 | 0 |
Given a ZEROindexed position `pos` on the contig, what is the relative ZEROindexed nucleotide position within this annotation's coding sequence? | def nt_pos(self, pos):
seq_consumed = 0
if self.coding_blocks is None or len(self.coding_blocks) == 0:
return int(self.end - pos - 1 if self.rev_strand else pos - self.start)
for block in (reversed(self.coding_blocks) if self.rev_strand else self.coding_blocks):
if pos >= block[0] and pos < block[1]:
if self.rev_strand: return (block[1] - pos - 1 + seq_consumed)
else: return (pos - block[0] + seq_consumed)
else:
seq_consumed += block[1] - block[0]
raise RuntimeError("Position %d not within feature %s" % (pos, self.seq_record.name)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_offset_pos(seq, pos):\n \n nogap_seq = transform_seq(seq)\n assert(pos >= 0 and pos < len(nogap_seq))\n\n maps = dict()\n cnt = 0\n maxi = 0\n for i in range(len(seq)):\n if seq[i] not in msa_characters:\n maps[i-cnt] = i\n maxi = i\n else:\n cnt += 1\n return maps.get(pos, maxi)\n \n #cnt = 0\n #k = 0\n #while k<len(seq):\n #print(k, cnt, seq[k])\n #offset = 0\n #while k+offset < len(seq) and seq[k+offset] in msa_characters:\n #offset += 1\n #else:\n #cnt += 1\n #k+=offset+1\n #if cnt == pos:\n #break\n #return k\n \n #k = 0 \n #cnt = 0 if seq[k] not in msa_characters else -1\n #while cnt != pos and k < len(seq):\n #if seq[k] not in msa_characters:\n #cnt += 1\n #k += 1 \n ##print(pos, cnt, k, seq)\n #return k",
"def compute_revoffset_pos(seq, pos):\n\n cnt = 0 \n for c in seq:\n if c in msa_characters:\n cnt += 1\n return pos - cnt",
"def _get_indel_pos(self, variant_pos, read):\n hardclipped = 0 if read.cigartuples[0][0] != 5 else read.cigartuples[0][1] # read location must be adjusted for\n # number of hardclipped bases represented in cigar but not in read_seq https://www.biostars.org/p/119537/\n iloc = variant_pos - read.reference_start + read.query_alignment_start - 1 + hardclipped\n return iloc",
"def aa_pos(self, pos):\n return self.nt_pos(pos) // 3",
"def mark_codon_pos(seq, pos=0):\n\n codons = []\n\n for base in seq:\n if base != \"-\":\n codons.append(pos)\n pos = (pos + 1) % 3\n else:\n codons.append(-1)\n\n return codons",
"def seq_positions(seq, codon):\n\n positions = []\n i = 0\n\n while codon in seq[i:]:\n pos = seq.find(codon, i)\n positions.append(pos)\n i = pos + 1\n positions.sort()\n return positions",
"def get_single_location(chrom, pos):\n return CHROMOSOME_TO_CODE[chrom] * int(1e9) + pos",
"def get_transcript_index_from_pos(\n pos,\n transcript_id,\n skip_untranslated_region= True):\n exons = get_exons_from_transcript(transcript_id)\n exons = exons.sort(columns=['seq_region_start_exon', 'seq_region_end_exon'])\n exons['exon_length'] = \\\n exons['seq_region_end_exon'] - exons['seq_region_start_exon'] + 1\n starts = exons['seq_region_start_exon']\n stops = exons['seq_region_end_exon']\n intervals = zip(starts, stops)\n\n transcript_length = exons['exon_length'].sum()\n transcript_idx = get_idx_from_interval(pos, intervals)\n\n if transcript_idx is None:\n logging.warning(\"Couldn't find position %d in transcript %s\",\n pos, transcript_id)\n else:\n # Reverse array index if on reverse strand\n forward = is_forward_strand(transcript_id)\n transcript_idx = transcript_idx if forward else \\\n transcript_length - transcript_idx - 1\n if skip_untranslated_region:\n # Adjust for translations (CDS) start region\n prefix_utr_length = get_five_prime_utr_length(exons, forward)\n if transcript_idx < prefix_utr_length:\n logging.warn(\n \"UTR mutation at cDNA position %d, transcript %s\",\n transcript_idx, transcript_id)\n return None\n else:\n transcript_idx -= prefix_utr_length\n\n # Adjust for CDS start phase if first exon is out of phase\n transcript_phase = get_cds_start_phase(transcript_id)\n transcript_idx += transcript_phase\n if transcript_phase > 0:\n logging.warn(\"Transcript %s is incomplete\", transcript_id)\n\n # TODO: check that index is within the mRNA transcript\n # need to get the length of the coding region from the transcript_id\n #suffix_utr_length = get_three_prime_utr_length(exons, forward)\n #assert transcript_idx <= transcript_length + suffix_utr_length\n\n return transcript_idx",
"def pos(self):\n return (self.raw - self.raw_zero) / self.ratio",
"def position_in_operon(self):\n if self.transcription_units:\n tu_lengths = [len(tu.location) for tu in self.transcription_units]\n longest_tu = self.transcription_units[int(np.argmax(tu_lengths))]\n if longest_tu.location.strand == 1:\n gene_starts = sorted([gene.location.start.position for gene in longest_tu.genes])\n this_gene_start = self.location.start.position\n else:\n gene_starts = sorted([gene.location.end.position for gene in longest_tu.genes])\n gene_starts.reverse()\n this_gene_start = self.location.end.position\n position = np.where(np.array(gene_starts) == this_gene_start)[0][0] + 1\n else:\n position = 1\n return position",
"def seq_2_pos(idx):\n\tglobal SEQ2POS\n\tif idx not in SEQ2POS:\n\t\treturn None\n\tcod = SEQ2POS[idx]\n\treturn (cod&0xFFFF) , (cod>>16)",
"def get_corrected_index(seq,\n aligned_index):\n \n # Counts the number of nucleotides in aligned sequence, returns\n # count of nucleotides occuring before aligned index reached\n slice_seq=seq[0:aligned_index]\n # If different gap characters used, may need to modify this\n # In current form, it is optimized for speed\n corrected_index=\\\n aligned_index - (slice_seq.count(\"-\") + slice_seq.count(\".\"))\n \n\n \n return corrected_index",
"def ind_pos(position, ind, current_geno, chr_starts, chr_ends):\n ind_starts = chr_starts[ind]\n ind_ends = chr_ends[ind]\n #print [position, ind, current_geno, ind_starts, ind_ends]\n in_interval = False\n for interval in range(len(ind_starts)):\n if position > int(ind_starts[interval]) and position < int(ind_ends[interval]):\n in_interval = True\n break\n if in_interval:\n return(current_geno)\n else:\n return(\"./.\")",
"def reconstruct_seq_centered(seq, nucleosome_pos):\n\n # equivalence\n d_nucleotide = {0: 1, # 'A',\n 1: 0, # 'C',\n 2: 1, # 'T',\n 3: 0, # 'G'\n }\n\n seqd = np.vectorize(d_nucleotide.get)(seq)\n array_nuc = []\n\n # select only the nucleosome positions\n for pos in nucleosome_pos:\n array_nuc.append(seqd[pos - 58:pos + 59])\n\n # do the stack\n center_nucleosome = np.sum(array_nuc, axis=0) / len(array_nuc)\n\n return center_nucleosome",
"def find_range_from_cons_pos(my_pos, gpcr_pdb):\n (ext_range,chain)=gpcr_pdb[my_pos]\n pos_range=str(ext_range)\n #pos_range=ext_range+\"-\"+ext_range\n return pos_range",
"def get_DNApos_fromcoords(self,x,y):\n\n # Are we close to the DNA sequence?\n if abs(y-self.seq_row)>10:\n return None\n\n # ok, DNA it is\n pos=int(float(x-self.seq_xstart+4.0)/self.base_scale.get())\n return pos",
"def minisat_encode_label(self,clause):\n mini_encoding = clause.minisat_encode()\n positional = self.encoding_positions[abs(mini_encoding)]\n positional += 1\n if clause.compliment:\n positional *= -1\n return positional",
"def get_rewritten_pos(self, pos):\n offset = 0\n for key_pos in self.insertions:\n if key_pos > pos:\n break\n offset += self.insertions[key_pos]\n\n for key_pos in self.removals:\n if key_pos >= pos:\n break\n offset -= self.removals[key_pos]\n\n return max(offset + pos, 0)",
"def ind(self, pos):\n row = int(pos[1:]) - 1\n column = self.letter_to_column(pos[0])\n return row, column",
"def ind(self, pos):\n row = int(pos[1:]) - 1\n column = self.letter_to_column(pos[0])\n return row, column",
"def ind(self, pos):\n row = int(pos[1:]) - 1\n column = self.letter_to_column(pos[0])\n return row, column",
"def getStartPos(peptide, seq, subst=None):\n return getStartPosMapper(seq, subst)(peptide)",
"def cpos2codon(self, cpos):\n self.ensure_seq()\n cpos = int(cpos)\n if self.strand == \"+\":\n np = []\n for beg, end in self.exons:\n np += list(range(max(beg, self.cds_beg),\n min(self.cds_end, end)+1))\n assert len(np) == len(self.seq)\n\n ni = cpos*3\n if ni <= len(np):\n codon = Codon()\n codon.index = cpos\n codon.locs = tuple(np[ni-3:ni])\n codon.gene = self.gene\n codon.chrm = self.chrm\n codon.strand = self.strand\n codon.seq = self.seq[ni-3:ni]\n return codon\n else:\n raise IncompatibleTranscriptError('invalid_cDNA_position_%d;expect_[0_%d]' % (ni, len(np)))\n else:\n np = []\n for beg, end in reversed(self.exons):\n np += list(range(min(self.cds_end, end),\n max(beg, self.cds_beg)-1,-1))\n assert len(np) == len(self.seq)\n\n ni = cpos*3\n if ni <= len(np):\n codon = Codon()\n codon.index = cpos\n codon.locs = tuple(reversed(np[ni-3:ni]))\n codon.gene = self.gene\n codon.chrm = self.chrm\n codon.strand = self.strand\n codon.seq = self.seq[ni-3:ni]\n return codon\n else:\n raise IncompatibleTranscriptError('invalid_cDNA_position_%d;expect_[0_%d]' % (ni, len(np)))",
"def get_trans_pos(genome):\n pos = random.randint(100, len(genome.seq)-100) # insert position\n if pos in genome.unavail_pos:\n pos = get_trans_pos(genome)\n return pos",
"def getPosTagAt(self, pos):\n return self.sentence[pos].getPosTag()",
"def decode_pos(pos):\n return pos // 3, pos % 3",
"def tnuc_resolve_pos(self, tnuc_pos_q):\n if tnuc_pos_q.pos < 0:\n return Pos(pos = self.cdslen(), tpos = tnuc_pos_q.tpos)\n return tnuc_pos_q",
"def pos(self):\n if 'pos' not in self.annotators:\n return None\n return [t[self.POS] for t in self.data]",
"def pos(self):\n if 'pos' not in self.annotators:\n return None\n return [t[self.POS] for t in self.data]",
"def make_codon_pos_align(aln):\n\n def func(seq):\n dct = {-1: \"-\",\n 0: \"0\",\n 1: \"1\",\n 2: \"2\"}\n return \"\".join(util.mget(dct, mark_codon_pos(seq)))\n return mapalign(aln, valfunc=func)"
] | [
"0.71565324",
"0.70421827",
"0.6502028",
"0.6497681",
"0.64480406",
"0.6386525",
"0.6327573",
"0.63075775",
"0.6233408",
"0.6213303",
"0.62095505",
"0.6183801",
"0.6171228",
"0.6140234",
"0.6121173",
"0.6088656",
"0.60556525",
"0.6051068",
"0.6037078",
"0.6037078",
"0.6037078",
"0.60354495",
"0.59821177",
"0.5978502",
"0.59758204",
"0.59550875",
"0.5943685",
"0.5936246",
"0.5936246",
"0.591546"
] | 0.7121677 | 1 |
Given an iterable `alts` of nucleotides to be substituted at contig position `pos`, return a list of the corresponding amino acid changes that would occur. `transl_table` is the NCBI genetic code to use when translating the coding sequence. | def aa_alts(self, alts, pos, transl_table=11):
aa_alts = []
nt_pos = self.nt_pos(pos)
aa_pos = self.aa_pos(pos)
for i, allele in enumerate(alts):
mut_seq = str(self.seq_record.seq)
if self.rev_strand:
allele = str(Seq(allele, generic_dna).reverse_complement())
if i == 0 and mut_seq[nt_pos].upper() != allele.upper():
# Sanity check: the reference (first) allele should be the nucleotide at nt_pos!
raise RuntimeError("Ref allele '%s' is incorrect for %s:c.%d" % (allele,
self.seq_record.name, nt_pos + 1))
# pad partial codons for the rare off-length annotations to avoid a BiopythonWarning
mut_seq_pad = "N" * (-len(mut_seq) % 3)
mut_seq = mut_seq[0:nt_pos] + allele + mut_seq[nt_pos+1:None] + mut_seq_pad
mut_seq_aa = str(Seq(mut_seq, generic_dna).translate(table=transl_table))
aa_alts.append(mut_seq_aa[aa_pos])
return aa_alts | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_AA_subs(s):\r\n test_seq = s.toseq()[70:217].translate() #Translate the mutated region\r\n substitutions = []\r\n \r\n for i in range(len(test_seq)):\r\n if test_seq[i] != align_temp[i]:\r\n substitutions.append(''.join([str(align_temp[i]),\r\n str(i+48),\r\n str(test_seq[i]),\r\n ' ']))\r\n \r\n return ''.join(substitutions).strip()",
"def get_all_translations(rna_sequence, genetic_code):\n #Convert all rna_sequence to upper case:\n rna_sequence=rna_sequence.upper()\n #get the lengh of RNA seq.\n total_rna_bases=len(rna_sequence)\n #Create an empty list to store all possible AA seq.\n polypeptide_list = []\n #Looping through all the RNA bases, selecting all 3 possible reading frames to scan for tranlation.\n for i in range(total_rna_bases):\n i_end= i +3\n next_three=rna_sequence[i:i_end]\n #Condition to check if the condon is start codon\n if next_three=='AUG':\n #If condition satisfies, translate all rna seq from start to stop codon using first function,\n #translate_sequence\n polypeptide=translate_sequence(rna_sequence[i:], genetic_code)\n polypeptide_list.append(polypeptide)\n #Return all 3 possible reading frames as a list in polypeptide_list\n return polypeptide_list",
"def translate_sequence(rna_sequence, genetic_code):\n #Crate an empty list to store AA sequence:\n AA_list = []\n # Convert all rna_sequence to upper case:\n rna_sequence=rna_sequence.upper()\n # Convert all rna_sequence into a list:\n rna_list = list(rna_sequence)\n # This conditon will run if rna_sequence is at least 3 bases long, and only once it find start codon ,\n #and stop once it finds stop codon.\n while True:\n if len(rna_list) > 2:\n codon=''.join(rna_list[0:3])\n #Delete first 3 bases since its alread added as codon, thus no longer needed.\n del rna_list[0:3]\n else:\n break\n #Using genetic code dictionary to find AA for each corresponding codon:\n AA=genetic_code[codon]\n #Break loop once it finds stop codon\n if AA=='*':\n break\n #Add add translatable AA to the AA_list:\n AA_list.append(AA)\n return ''.join(AA_list)",
"def translate(args):\n from jcvi.utils.cbook import gene_name\n\n transl_tables = [str(x) for x in range(1, 25)]\n p = OptionParser(translate.__doc__)\n p.add_option(\n \"--ids\",\n default=False,\n action=\"store_true\",\n help=\"Create .ids file with the complete/partial/gaps label\",\n )\n p.add_option(\n \"--longest\",\n default=False,\n action=\"store_true\",\n help=\"Find the longest ORF from each input CDS\",\n )\n p.add_option(\n \"--table\",\n default=1,\n choices=transl_tables,\n help=\"Specify translation table to use\",\n )\n p.add_option(\n \"--strip_names\",\n default=False,\n action=\"store_true\",\n help=\"Strip alternative splicing (e.g. At5g06540.1 -> At5g06540)\",\n )\n p.add_option(\n \"--unique\",\n default=False,\n action=\"store_true\",\n help=\"Ensure the output FASTA contains unique identifiers\",\n )\n p.set_outfile()\n\n opts, args = p.parse_args(args)\n strip_names = opts.strip_names\n unique = opts.unique\n\n if len(args) != 1:\n sys.exit(not p.print_help())\n\n (cdsfasta,) = args\n if opts.longest:\n cdsfasta = longestorf([cdsfasta])\n\n f = Fasta(cdsfasta, lazy=True)\n outfile = opts.outfile\n fw = must_open(outfile, \"w\")\n\n if opts.ids:\n idsfile = cdsfasta.rsplit(\".\", 1)[0] + \".ids\"\n ids = open(idsfile, \"w\")\n else:\n ids = None\n\n five_prime_missing = three_prime_missing = 0\n contain_ns = complete = cannot_translate = total = 0\n\n seen = set()\n grand_total = 0\n for name, rec in f.iteritems_ordered():\n grand_total += 1\n\n if strip_names:\n name = gene_name(name)\n\n if unique and name in seen:\n continue\n\n cds = rec.seq\n cdslen = len(cds)\n peplen = cdslen // 3\n total += 1\n\n # Try all three frames\n pep = \"\"\n for i in range(3):\n newcds = cds[i : i + peplen * 3]\n newpep = newcds.translate(table=opts.table)\n if len(newpep.split(\"*\")[0]) > len(pep.split(\"*\")[0]):\n pep = newpep\n\n labels = []\n if \"*\" in pep.rstrip(\"*\"):\n logging.error(\"{0} cannot translate\".format(name))\n cannot_translate += 1\n labels.append(\"cannot_translate\")\n\n contains_start = pep.startswith(\"M\")\n contains_stop = pep.endswith(\"*\")\n contains_ns = \"X\" in pep\n start_ns = pep.startswith(\"X\")\n end_ns = pep.endswith(\"X\")\n\n if not contains_start:\n five_prime_missing += 1\n labels.append(\"five_prime_missing\")\n if not contains_stop:\n three_prime_missing += 1\n labels.append(\"three_prime_missing\")\n if contains_ns:\n contain_ns += 1\n labels.append(\"contain_ns\")\n if contains_start and contains_stop:\n complete += 1\n labels.append(\"complete\")\n if start_ns:\n labels.append(\"start_ns\")\n if end_ns:\n labels.append(\"end_ns\")\n\n if ids:\n print(\"\\t\".join((name, \",\".join(labels))), file=ids)\n\n peprec = SeqRecord(pep, id=name, description=rec.description)\n SeqIO.write([peprec], fw, \"fasta\")\n fw.flush()\n seen.add(name)\n\n print(\n \"Complete gene models: {0}\".format(percentage(complete, total)), file=sys.stderr\n )\n print(\n \"Missing 5`-end: {0}\".format(percentage(five_prime_missing, total)),\n file=sys.stderr,\n )\n print(\n \"Missing 3`-end: {0}\".format(percentage(three_prime_missing, total)),\n file=sys.stderr,\n )\n print(\"Contain Ns: {0}\".format(percentage(contain_ns, total)), file=sys.stderr)\n\n if cannot_translate:\n print(\n \"Cannot translate: {0}\".format(percentage(cannot_translate, total)),\n file=sys.stderr,\n )\n\n fw.close()\n\n logging.debug(\n \"Total records: {}, Unique records (strip_names={}): {}\".format(\n grand_total, strip_names, len(seen)\n )\n )\n\n return cdsfasta, outfile",
"def translate_sequence(sequence, genetic_code = {'GUC': 'V', 'ACC': 'T', 'GUA': 'V', 'GUG': 'V', 'ACU': 'T', 'AAC': 'N', 'CCU': 'P', 'UGG': 'W', 'AGC': 'S', 'AUC': 'I', 'CAU': 'H', 'AAU': 'N', 'AGU': 'S', 'GUU': 'V', 'CAC': 'H', 'ACG': 'T', 'CCG': 'P', 'CCA': 'P', 'ACA': 'T', 'CCC': 'P', 'UGU': 'C', 'GGU': 'G', 'UCU': 'S', 'GCG': 'A', 'UGC': 'C', 'CAG': 'Q', 'GAU': 'D', 'UAU': 'Y', 'CGG': 'R', 'UCG': 'S', 'AGG': 'R', 'GGG': 'G', 'UCC': 'S', 'UCA': 'S', 'UAA': '*', 'GGA': 'G', 'UAC': 'Y', 'GAC': 'D', 'UAG': '*', 'AUA': 'I', 'GCA': 'A', 'CUU': 'L', 'GGC': 'G', 'AUG': 'M', 'CUG': 'L', 'GAG': 'E', 'CUC': 'L', 'AGA': 'R', 'CUA': 'L', 'GCC': 'A', 'AAA': 'K', 'AAG': 'K', 'CAA': 'Q', 'UUU': 'F', 'CGU': 'R', 'CGC': 'R', 'CGA': 'R', 'GCU': 'A', 'GAA': 'E', 'AUU': 'I', 'UUG': 'L', 'UUA': 'L', 'UGA': '*', 'UUC': 'F'}, start_pos = 0):\n #find first orf\n #first_orf_seq = find_first_orf(sequence)\n\n # ensure sequence is uppercase\n seq = sequence.upper()\n\n #translate the sequence\n protein = \"\"\n for i in range(0, len(seq) - (len(seq) % 3), 3):\n codon = seq[i:i + 3]\n if genetic_code[codon] == \"*\":\n break\n protein += genetic_code[codon]\n return protein",
"def translate_DNA(dnaStrand,translation_table='DNA_TABLE.txt'):\r\n\r\n #dictionary to store the corresponding protein for each codon\r\n d={'TTT':'F','CTT':'L','ATT':'I','GTT':'V','TTC':'F','CTC':'L','ATC':'I','GTC':'V','TTA':'L','CTA':'L','ATA':'I','GTA':'V','TTG':'L','CTG':'L','ATG':'M','GTG':'V','TCT':'S','CCT':'P','ACT':'T','GCT':'A','TCC':'S','CCC':'P','ACC':'T','GCC':'A','TCA':'S','CCA':'P','ACA':'T','GCA':'A','TCG':'S','CCG':'P','ACG':'T','GCG':'A','TAT':'Y','CAT':'H','AAT':'N','GAT':'D','TAC':'Y','CAC':'H','AAC':'N','GAC':'D','TAA':'Stop','CAA':'Q','AAA':'K','GAA':'E','TAG':'Stop','CAG':'Q','AAG':'K','GAG':'E','TGT':'C','CGT':'R','AGT':'S','GGT':'G','TGC':'C','CGC':'R','AGC':'S','GGC':'G','TGA':'Stop','CGA':'R','AGA':'R','GGA':'G','TGG':'W','CGG':'R','AGG':'R','GGG':'G'}\r\n protiens=\"\"\r\n for i in range(0,len(dnaStrand),3):\r\n #extracting each codon\r\n s=dnaStrand[i:i+3]\r\n if(d[s]!=\"Stop\"):\r\n protiens+=d[s]\r\n\r\n return protiens",
"def back_translate(seq):\n\n base_nucleotide_list = []\n for i in seq:\n res = __get_key(i,CodonTable)\n base_nucleotide_list.append(res)\n return ''.join(base_nucleotide_list)",
"def proteinTranslation(seq, geneticCode = STANDARD_GENETIC_CODE):\n\n seq = seq.replace('T','U') # Make sure we have RNA sequence\n proteinSeq = []\n \n i = 0\n while i+2 < len(seq):\n \n codon = seq[i:i+3]\n aminoAcid = geneticCode[codon]\n \n if aminoAcid is None: # Found stop codon\n break\n\n proteinSeq.append(aminoAcid)\n i += 3\n\n return proteinSeq",
"def codon_table_to_codon_map(codon_table: dict, deterministic: bool = True) -> Callable[[SeqLike], SeqLike]:\n\n def backtranslator(seq):\n if seq._type != \"AA\":\n raise TypeError(\"Sequence must be an AA SeqLike!\")\n seq_str = seq.to_str()\n\n nt = \"\"\n for aa in seq_str:\n codons, probs = zip(*codon_table[aa].items())\n\n # we normalize the probabilities\n # most tables are near 1.0, but issues with precision exist\n sum_prob = sum(probs)\n probs = [p / sum_prob for p in probs]\n\n if deterministic:\n nt += codons[0]\n else:\n nt += np.random.choice(codons, p=probs)\n\n new_seqlike = SeqLike(\n nt,\n id=seq.id,\n name=seq.name,\n description=seq.description,\n annotations=seq.annotations,\n dbxrefs=seq.dbxrefs,\n seq_type=\"dna\",\n codon_map=seq.codon_map,\n )\n new_seqlike._aa_record = deepcopy(seq._aa_record)\n return new_seqlike\n\n return backtranslator",
"def retrasladar(translated, t=0):\n translated = translated.split(',')\n aux = list()\n for tran in translated:\n # print(f'tran: {tran}', type(tran))\n if tran.isdigit():\n tran_int = int(tran)\n c = int((tran_int - t) / 26)\n if c < 1:\n c = 1\n if tran_int > 26:\n position_base = tran_int - 26 * c - t\n else:\n position_base = tran_int - t\n # print(f'c={(tran_int-t) / 26}. c ~= {c}')\n else:\n position_base = tran\n if position_base == 0:\n position_base = 26\n aux.append(position_base)\n print(f'Posiciones base: {aux}')\n return aux",
"def aa(seq):\n global codontable\n seq = seq.upper()\n if codontable is None:\n # TODO: figure out the right place for the pre-computed information here\n bases = ['T', 'C', 'A', 'G']\n codons = [a+b+c for a in bases for b in bases for c in bases]\n codons = codons + list(map(lambda x: x.lower(), codons))\n amino_acids = 'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG'\n amino_acids = amino_acids + amino_acids.lower()\n codontable = dict(zip(codons, amino_acids))\n res = ''\n for i in range(0, len(seq) - 2, 3):\n res += codontable[seq[i:(i+3)]]\n return res",
"def translate(dna):\n rna = dna.replace('T', 'U')\n startIndex = dna.find('AUG') + 1\n aminoAcidsSeq = \"\"\n for i in range(startIndex, len(rna), 3):\n # codon = rna[i: i+3]\n aminoAcidsSeq += code[rna[i: i+3]]\n if aminoAcidsSeq[len(aminoAcidsSeq) - 1] == '*':\n aminoAcidsSeq = aminoAcidsSeq[:-1]\n break\n return aminoAcidsSeq",
"def extend_taa_seq(self, taa_pos_base, old_seq, new_seq):\n taa_pos = None\n termlen = -1 # use -1 to detect abnormal computes\n seq_end = self.cds_end\n new_aa_seq = ''\n i = 0\n while True:\n ci = i*3\n old_codon_seq = old_seq[ci:ci+3]\n new_codon_seq = new_seq[ci:ci+3]\n # if sequence comes to ends, extend sequence from reference file\n if (old_codon_seq not in standard_codon_table or\n new_codon_seq not in standard_codon_table):\n seq_inc = faidx.refgenome.fetch_sequence(self.chrm, seq_end+1, seq_end+100)\n old_seq += seq_inc\n new_seq += seq_inc\n old_codon_seq = old_seq[ci:ci+3]\n new_codon_seq = new_seq[ci:ci+3]\n seq_end += 100\n\n taa_ref_run = codon2aa(old_codon_seq)\n taa_alt_run = codon2aa(new_codon_seq)\n new_aa_seq += taa_alt_run\n # print i, old_codon_seq, new_codon_seq, taa_ref_run, taa_alt_run\n if taa_pos == None and taa_ref_run != taa_alt_run:\n taa_pos = i\n taa_ref = taa_ref_run\n taa_alt = taa_alt_run\n if taa_alt_run == '*':\n if taa_pos == None:\n # stop codon encountered before difference\n return None # nothing occur to protein level\n termlen = i + 1 - taa_pos\n break\n i += 1\n\n new_aa_seq = new_aa_seq[taa_pos:]\n if taa_pos == None:\n print('oldseq', old_seq)\n print('newseq', new_seq)\n taa_pos += taa_pos_base\n\n aae = AAExtension()\n aae.taa_pos = taa_pos\n aae.taa_ref = taa_ref\n aae.taa_alt = taa_alt\n aae.termlen = termlen\n aae.new_aa_seq = new_aa_seq\n\n return aae",
"def revcomp(self, seq):\n tab = self.maketrans(b'ACNGT', b'TGNCA')\n return seq.translate(tab)[::-1]",
"def formatted_alignments(self,chosen_a_idxs,bitext,alignments,e_words,f_words):\n output =[]\n output_idxs = []\n for key in chosen_a_idxs.keys():\n temp = []\n temp_idx = []\n idx = chosen_a_idxs[key]\n alignment = alignments[idx]\n for t in alignment:\n temp.append((e_words[t[0]],f_words[t[1]]))\n temp_idx.append((bitext[key][\"en\"].index(e_words[t[0]]),bitext[key][\"fr\"].index(f_words[t[1]])))\n output.append(temp)\n output_idxs.append(temp_idx)\n return output,output_idxs",
"def map_mutations(self, genotypes, alleles, ancestral_state=None):\n genotypes = util.safe_np_int_cast(genotypes, np.int8)\n max_alleles = np.max(genotypes)\n if ancestral_state is not None:\n if isinstance(ancestral_state, str):\n # Will raise a ValueError if not in the list\n ancestral_state = alleles.index(ancestral_state)\n if ancestral_state < 0 or ancestral_state >= len(alleles):\n raise ValueError(\"ancestral_state not between 0 and (num_alleles-1)\")\n max_alleles = max(ancestral_state, max_alleles)\n if max_alleles >= 64:\n raise ValueError(\"A maximum of 64 states is supported\")\n ancestral_state, transitions = self._ll_tree.map_mutations(\n genotypes, ancestral_state\n )\n # Translate back into string alleles\n ancestral_state = alleles[ancestral_state]\n mutations = [\n Mutation(\n node=node,\n derived_state=alleles[derived_state],\n parent=parent,\n metadata=self.tree_sequence.table_metadata_schemas.mutation.empty_value,\n )\n for node, parent, derived_state in transitions\n ]\n return ancestral_state, mutations",
"def translate(codon):\n \n table = { \n 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M', \n 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T', \n 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K', \n 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R', \n 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L', \n 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P', \n 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q', \n 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R', \n 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V', \n 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A', \n 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E', \n 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G', \n 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S', \n 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L', \n 'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*', \n 'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W', \n } \n \n assert codon in table.keys(), \"Not a valid codon sequence.\"\n \n return table[codon]",
"def translate(a, table, deletechars=None):\n a_arr = numpy.asarray(a)\n if issubclass(a_arr.dtype.type, str_):\n return _vec_string(\n a_arr, a_arr.dtype, 'translate', (table,))\n else:\n return _vec_string(\n a_arr, a_arr.dtype, 'translate', [table] + _clean_args(deletechars))",
"def transliterate_raga(raga, trans):\n raga.regional_name = transliterate(trans, raga.regional_name)\n raga.janaka = transliterate(trans, raga.janaka)\n raga.aro_swaras = transliterate(trans, raga.aro_swaras)\n raga.ava_swaras = transliterate(trans, raga.ava_swaras)\n raga.category = transliterate(trans, raga.category)\n raga.chakra = transliterate(trans, raga.chakra)\n raga.hind = transliterate(trans, raga.hind)\n raga.kritis = transliterate(trans, raga.kritis)\n raga.songs = transliterate(trans, raga.songs)\n raga.alternates = transliterate(trans, raga.alternates)\n raga.varnams = transliterate(trans, raga.varnams)\n raga.same_aro = transliterate(trans, raga.same_aro)\n raga.same_ava = transliterate(trans, raga.same_ava)\n raga.one_swara_diff = transliterate(trans, raga.one_swara_diff)\n return raga",
"def sequence_replace(sequences, char_to_replace, char_replacements):\n return [sequence_replace_single(sequence, char_to_replace, char_replacements) for sequence in sequences]",
"def back_translate(aln_file, seqdict):\n aln = SeqIO.parse(aln_file.name, 'fasta')\n bt_seq = []\n for prot_seq in aln:\n codon = 0\n bt = ''\n nuc = seqdict[prot_seq.id]\n for aa in prot_seq:\n if aa == '-':\n bt += '---'\n else:\n bt += nuc[codon*3:(codon*3)+3]\n codon += 1\n bt_seq.append(bt)\n return bt_seq",
"def convert_ambigs(strings, alph):\n ms = alph.translator(False)\n for i in range(len(strings)):\n strings[i] = strings[i].translate(ms)\n return(strings)",
"def translate(rna):\n RNA_CODON_TABLE = {\"UUU\": \"F\", \"UUC\": \"F\", \"UUA\": \"L\", \"UUG\": \"L\",\n \"UCU\": \"S\", \"UCC\": \"S\", \"UCA\": \"S\", \"UCG\": \"S\",\n \"UAU\": \"Y\", \"UAC\": \"Y\", \"UAA\": \"*\", \"UAG\": \"*\",\n \"UGU\": \"C\", \"UGC\": \"C\", \"UGA\": \"*\", \"UGG\": \"W\",\n \"CUU\": \"L\", \"CUC\": \"L\", \"CUA\": \"L\", \"CUG\": \"L\",\n \"CCU\": \"P\", \"CCC\": \"P\", \"CCA\": \"P\", \"CCG\": \"P\",\n \"CAU\": \"H\", \"CAC\": \"H\", \"CAA\": \"Q\", \"CAG\": \"Q\",\n \"CGU\": \"R\", \"CGC\": \"R\", \"CGA\": \"R\", \"CGG\": \"R\",\n \"AUU\": \"I\", \"AUC\": \"I\", \"AUA\": \"I\", \"AUG\": \"M\",\n \"ACU\": \"T\", \"ACC\": \"T\", \"ACA\": \"T\", \"ACG\": \"T\",\n \"AAU\": \"N\", \"AAC\": \"N\", \"AAA\": \"K\", \"AAG\": \"K\",\n \"AGU\": \"S\", \"AGC\": \"S\", \"AGA\": \"R\", \"AGG\": \"R\",\n \"GUU\": \"V\", \"GUC\": \"V\", \"GUA\": \"V\", \"GUG\": \"V\",\n \"GCU\": \"A\", \"GCC\": \"A\", \"GCA\": \"A\", \"GCG\": \"A\",\n \"GAU\": \"D\", \"GAC\": \"D\", \"GAA\": \"E\", \"GAG\": \"E\",\n \"GGU\": \"G\", \"GGC\": \"G\", \"GGA\": \"G\", \"GGG\": \"G\"}\n str = ''\n list = [rna[i:i+3] for i in range(0,len(rna),3)]\n for x in list:\n #checks if x is in key of RNA_CODON_TABLE\n if x in RNA_CODON_TABLE:\n #appends only if the value for the given key is not *\n if RNA_CODON_TABLE[x] != '*':\n str = str + RNA_CODON_TABLE[x]\n #if only one char is extra(meaning apart form the 3 pair characters available in dictionary)\n #checks if the char is in following\n elif len(x) == 1 and x in ['A','G','C','U']:\n str = str + x\n #if the char is of length 2 i.e, 2 words extra\n elif len(x) == 2 and x[0] in ['A','G','C','U'] and x[1] in ['A','G','C','U']:\n #Then appending the char to the actually converted string\n str = str + x[0]\n str = str + x[1]\n #if the char is not in the above characters then it is a unrecognised character.\n else:\n print(\"Unrecognised character:\",x)\n return str",
"def translate(self, table, deletechars=None):\n return asarray(translate(self, table, deletechars))",
"def determine_aa_change( self ):\n for k,v in self.obj_mi.hash_isoforms.iteritems(): #k = string that is isoform_id, v = Isoform instance\n obj_tt = self.create_transcript_instances( k )\n\n #METHOD 1: get the original codon & mutated codon\n # orig_codon = obj_tt.retrieve_containing_codon( self.snv_start, self.snv_strand )\n # i_genome_pos = obj_tt.arr_genome_pos.index( self.snv_start )\n # obj_tt.arr_nuc_seq[ i_genome_pos ] = self.base_alt\n # mut_codon = obj_tt.retrieve_containing_codon( self.snv_start, self.snv_strand )\n\n\n #METHOD 2: get the mutated codon\n full_pos = self.snv_chrom + ':' + str( self.snv_start ) + '-' + str( self.snv_end )\n hash_codon_info = obj_tt.get_mutated_codon( self.base_orig, self.base_alt, full_pos, self.snv_strand, True ) #output is hash -> {'codon_orig': codon_orig, 'codon_mut': codon_mut, 'aa_orig': aa_orig, 'aa_mut': aa_mut}\n\n\n\n ##TEST:: show the AA change based on mutation\n # print \"hash_codon_info: \"\n # print hash_codon_info\n\n # print \"gene strand & snv strand: \", obj_tt.iso_sj.strand, \" & \", self.snv_strand\n # print \"original base > mutated base: \", self.base_orig, \" > \", self.base_alt\n # print \"original codon > mutated codon: \", hash_codon_info['codon_orig'], \" > \", hash_codon_info['codon_mut']\n # print \"original AA > mutated AA: \", hash_codon_info['aa_orig'], \" > \", hash_codon_info['aa_mut']\n\n\n ##TEST:: determine consequence\n print \"GV_DAAC 1: \"\n obj_tt.alteration_consequence( self.base_alt, self.get_genomic_range(), self.snv_strand, self.alt_type )\n \n\n ##TEST METHOD - SEE WHAT STEPS I NEED TO PERFORM\n #TEST:: retrieve the original base & the mutated base\n # i_genome_pos = obj_tt.arr_genome_pos.index( self.snv_start )\n # orig_base = obj_tt.arr_nuc_seq[ i_genome_pos ]\n # print \"k = \", k, \" & i_genome_pos = \", i_genome_pos, \" | orig_base = \", orig_base, \" & double_check = \", self.base_orig, \" & iso_sj.strand = \", obj_tt.iso_sj.strand, \" & mut strand = \", self.snv_strand\n # hash_orig_codon = obj_tt.find_containing_codon( self.snv_start )\n # print \"hash_orig = \", hash_orig_codon\n # get_orig_codon = obj_tt.arr_nuc_seq[ hash_orig_codon['i_genome_start']:hash_orig_codon['i_genome_end'] + 1 ]\n # str_orig_codon = ''.join( get_orig_codon ) if obj_tt.iso_sj.strand > 0 else ''.join( get_orig_codon[::-1] )\n # print \"seq_orig = \", str_orig_codon, \" & type = \", type( get_orig_codon ), \" & rf = \", obj_tt.arr_rf[ hash_orig_codon['i_genome_start']:hash_orig_codon['i_genome_end'] + 1 ], \" & list_orig_codon = \", get_orig_codon\n\n # ##TEST:: make mutation\n # obj_tt.arr_nuc_seq[ i_genome_pos ] = self.base_alt\n # hash_mut_codon = obj_tt.find_containing_codon( self.snv_start )\n # print \"hash_muts = \", hash_mut_codon\n # get_mut_codon = obj_tt.arr_nuc_seq[ hash_mut_codon['i_genome_start']:hash_mut_codon['i_genome_end'] + 1 ]\n # str_mut_codon = ''.join( get_mut_codon ) if obj_tt.iso_sj.strand > 0 else ''.join( get_mut_codon[::-1] )\n # print \"seq_muts = \", str_mut_codon, \" & type = \", type( get_mut_codon ), \" & rf = \", obj_tt.arr_rf[ hash_mut_codon['i_genome_start']:hash_mut_codon['i_genome_end'] + 1 ], \" & list_mut_codon = \", get_mut_codon \n\n # ##TEST:: retrieve \n # print \"AA: from \", Seq( str_orig_codon ).translate( to_stop = False ), \">\", Seq( str_mut_codon ).translate( to_stop = False )\n\n # try:\n # i_genome_pos = obj_tt.arr_genome_pos.index( self.snv_start )\n # orig_base = obj_tt.arr_nuc_seq[ i_genome_pos ]\n # print \"k = \", k, \" & i_genome_pos = \", i_genome_pos, \" | orig_base = \", orig_base, \" & double_check = \", self.base_orig, \" & iso_sj.strand = \", obj_tt.iso_sj.strand, \" & mut strand = \", self.snv_strand\n # hash_orig_codon = obj_tt.find_containing_codon( self.snv_start )\n # print \"hash_orig = \", hash_orig_codon\n # get_orig_codon = obj_tt.arr_nuc_seq[ hash_orig_codon['i_genome_start']:hash_orig_codon['i_genome_end'] ]\n # print \"seq_orig = \", get_orig_codon\n\n # ##TEST:: make mutation\n # obj_tt.arr_nuc_seq[ i_genome_pos ] = self.base_alt\n # hash_mut_codon = obj_tt.find_containing_codon( self.snv_start )\n # print \"hash_muts = \", hash_mut_codon\n # get_mut_codon = obj_tt.arr_nuc_seq[ hash_mut_codon['i_genome_start']:hash_mut_codon['i_genome_end'] ]\n # print \"seq_muts = \", get_mut_codon \n\n # ##TEST:: retrieve \n # print \"AA: from \", Seq( orig_codon ).translate( to_stop = False ), \">\", Seq( mut_codon ).translate( to_stop = False )\n # except:\n # print \"ERROR:: for \", k, \", position does not exist: \", self.snv_start\n # continue\n\n print \"////////////////////\\n\"",
"def make_codon_pos_align(aln):\n\n def func(seq):\n dct = {-1: \"-\",\n 0: \"0\",\n 1: \"1\",\n 2: \"2\"}\n return \"\".join(util.mget(dct, mark_codon_pos(seq)))\n return mapalign(aln, valfunc=func)",
"def convert_clifford_sequence_to_tape(\n clifford_sequence, lutmapping, gate_decomposition=gate_decomposition\n):\n # This is intended to replace the block below but not done because\n # I cannot test it at this moment (MAR)\n # decomposed_seq = decompose_clifford_seq(clifford_sequence,\n # gate_decomposition)\n decomposed_seq = []\n for cl in clifford_sequence:\n decomposed_seq.extend(gate_decomposition[cl])\n tape = []\n for g in decomposed_seq:\n tape.append(lutmapping.index(g))\n return tape",
"def ea_equivalent_permutation_mappings(f, spaces=None):\n N = int(log(len(f), 2))\n mask = sum((1 << i) for i in range(0, N))\n if spaces == None:\n spaces = get_lat_zeroes_spaces(f)\n result = []\n for V in spaces:\n if thickness(V, N) == N:\n L_lut = [-1 for x in range(0, 2**N)]\n full_space = linear_span(V)\n for x in full_space:\n L_lut[x & mask] = x >> N\n if -1 in L_lut:\n raise Exception(\"Problem in EA-equivalent mapping\")\n else:\n result.append(\n linear_function_lut_to_matrix(L_lut).transpose()\n )\n return result",
"def transcribe(seq):\n rna = ''\n for letter in seq:\n if letter == 'A':\n rna = rna + 'U'\n elif letter == 'T':\n rna = rna + 'A'\n elif letter == 'G':\n rna = rna + 'C'\n else:\n rna = rna + 'G'\n return rna",
"def translation (RNA):\n\tresult = []\n\tif len(RNA) == 0: return result\n\tdna.isNucleotide(RNA)\n\tif \"T\" in RNA: raise dna.InvalidSequenceError\n\n\torf = dna.get_orf(RNA)\n\n\tfor frame in orf:\n\t\tpeptide = return_peptide(RNA[frame[0]:frame[1]])\n\t\tresult.append(peptide)\n\treturn result"
] | [
"0.5816646",
"0.5639811",
"0.5312178",
"0.52944434",
"0.5252611",
"0.5239229",
"0.52306616",
"0.5195211",
"0.5128543",
"0.5119812",
"0.507834",
"0.5040113",
"0.5009105",
"0.49986827",
"0.49927104",
"0.49352637",
"0.49287802",
"0.49183488",
"0.4885356",
"0.48789275",
"0.48654947",
"0.48267096",
"0.48164156",
"0.48050135",
"0.47819167",
"0.47750467",
"0.4760449",
"0.47412276",
"0.4740684",
"0.4735139"
] | 0.73933816 | 0 |
Load all genes in the BED file as SeqRecords, fetching their sequence data from the reference. ref_contigs is a dictionary of ref contig sequences created with BioPython's SeqIO.to_dict(). | def get_bed_annots(bed_path, ref_contigs, quiet=False):
annots = defaultdict(list)
with open(bed_path) as f:
for line in f:
line = line.strip().split("\t")
# Note: BED coordinates are 0-indexed, right-open.
chrom, start, end, name, strand = line[0], int(line[1]), int(line[2]), line[3], line[5]
gene_id = line[12] if len(line) >= 13 else ""
desc = line[13] if len(line) >= 14 else ""
ref_contig = ref_contigs[chrom]
gene_seq = Seq(str(ref_contig.seq)[start:end], generic_dna)
if strand == '-':
gene_seq = gene_seq.reverse_complement()
gene_seq_record = SeqRecord(gene_seq, id=gene_id, name=name, description=desc)
coding_blocks = []
if (len(line) >= 12 and line[9].isdigit() and re.match(COMMA_DELIM_INTEGERS, line[10])
and re.match(COMMA_DELIM_INTEGERS, line[11])):
# We have full blockCount, blockSizes, and blockStarts annotations
block_starts = map(int, re.split(r'\s*,\s*', line[11]))
thick_start = int(line[6]) if line[6].isdigit() else start
thick_end = int(line[7]) if line[7].isdigit() else end
for i, block_size in enumerate(re.split(r'\s*,\s*', line[10])[0:int(line[9])]):
if i >= len(block_starts): break
block_start = block_starts[i] + start
block_end = block_start + int(block_size)
if block_end <= thick_start: next
if block_start > thick_end: next
block_start = max(thick_start, block_start)
block_end = min(thick_end, block_end)
coding_blocks.append((block_start, block_end))
elif len(line) >= 8 and line[6].isdigit() and line[7].isdigit():
# Only thickStart and thickEnd are specified. In this case, there is one coding block.
coding_blocks.append((int(line[6]), int(line[7])))
else:
coding_blocks.append((start, end))
annot = Annot(start, end, strand == '-', gene_seq_record, coding_blocks)
annots[contig_to_vcf_chrom(chrom)].append(annot)
return annots | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_gene_dict(reference_genbank_name=\"data/covid-19-genbank.gb\"):\n recs = [rec for rec in SeqIO.parse(reference_genbank_name, \"genbank\")]\n gene_dict = {}\n for rec in recs:\n feats = [feat for feat in rec.features if feat.type == \"CDS\"]\n for feat in feats:\n content = '{}: {}'.format(feat.qualifiers['protein_id'][0], feat.qualifiers['product'][0])\n if feat.qualifiers['product'][0] == 'ORF1a polyprotein':\n continue\n if feat.location_operator == 'join':\n for item in feat.location.parts:\n key = (item.start.position, item.end.position)\n if 'translation' in feat.qualifiers:\n seq = feat.qualifiers['translation']\n if len(seq) == 1:\n amino_acid_seq = seq[0]\n gene_dict[key] = (content, amino_acid_seq)\n else:\n key = (feat.location.start.position, feat.location.end.position)\n if 'translation' in feat.qualifiers:\n seq = feat.qualifiers['translation']\n if len(seq) == 1:\n amino_acid_seq = seq[0]\n gene_dict[key] = (content, amino_acid_seq)\n return gene_dict",
"def loadReferenceContigs(referencePath, alignmentSet, windows=None):\n # FIXME we should get rid of this entirely, but I think it requires\n # fixing the inconsistency in how contigs are referenced here versus in\n # pbcore.io\n\n # Read contigs from FASTA file (or XML dataset)\n refReader = ReferenceSet(referencePath)\n contigs = []\n if windows is not None:\n refNames = set([rw.refName for rw in windows])\n for contig in refReader:\n if contig.id in refNames:\n contigs.append(contig)\n else:\n contigs.extend([x for x in refReader])\n contigDict = dict([(x.id, x) for x in contigs])\n\n # initially each contig has an id of None -- this will be overwritten with the id from the cmp.h5, if there are any\n # reads mapped to it.\n for x in contigs:\n x.cmph5ID = None\n\n # Mark each contig with it's ID from the cmp.h5 - match them up using MD5s\n for x in alignmentSet.referenceInfoTable:\n if x.FullName in contigDict:\n contigDict[x.FullName].cmph5ID = x.ID\n\n return contigs",
"def fetchRefSeqDict(RefSeqBed=\"/fg/compbio-t/lgoff/magda/references/human/transcriptome/hg18/hg18_RefSeq.bed\"):\n res = {}\n iter = parseBed(RefSeqBed)\n for i in iter:\n res[i.name] = i\n return res",
"def read_refgene_file(self, file_path):\n with open(file_path) as fi:\n for line in fi:\n content = line.strip().split()\n refgene = RefGene()\n refgene.bin = int(content[0])\n refgene.name, refgene.chrom, refgene.strand = content[1], content[2].lower(), content[3]\n refgene.tx_start, refgene.tx_end = int(content[4]), int(content[5])\n refgene.cds_start, refgene.cds_end = int(content[6]), int(content[7])\n refgene.exon_count = int(content[8])\n refgene.exon_starts = [int(e) for e in content[9][:-1].split(',')]\n refgene.exon_ends = [int(e) for e in content[10][:-1].split(',')]\n refgene.core, refgene.name2 = int(content[11]), content[12]\n refgene.cds_start_stat, refgene.cds_end_stat = content[13], content[14]\n # refgene.exon_frames = [int(e) for e in content[15][:-1].split(',')]\n self.ref_genes.append(refgene)",
"def get_sequin_annots(sequin_path, ref_contigs, quiet=False):\n annots = defaultdict(list)\n \n # We need a dummy class to hold the current state while parsing\n # (otherwise the below private functions can't modify it; there's no \"nonlocal\" in python 2.x)\n class _:\n in_contig = None\n in_feature = None\n gene_name = None\n desc = None\n chrom_start = None\n chrom_end = None\n strand = None\n feature_seq_str = \"\"\n coding_blocks = []\n \n def _save_sequin_feature():\n # The only features we care about are the CDS features. Others get discarded during parsing.\n if _.in_feature == \"CDS\":\n if len(_.feature_seq_str) == 0:\n if not quiet: sys.stderr.write(\"WARN: 0-length CDS in contig %s\" % _.in_contig)\n elif _.gene_name is None or _.strand is None or _.chrom_start is None or _.chrom_end is None:\n if not quiet: sys.stderr.write(\"WARN: invalid CDS feature in contig %s\" % _.in_contig)\n else:\n gene_seq = Seq(_.feature_seq_str, generic_dna)\n if _.strand == '-':\n gene_seq = gene_seq.reverse_complement()\n gene_seq_record = SeqRecord(gene_seq, id=_.gene_name, name=_.gene_name, description=_.desc)\n annot = Annot(_.chrom_start, _.chrom_end, _.strand == '-', gene_seq_record, \n _.coding_blocks)\n annots[contig_to_vcf_chrom(_.in_contig)].append(annot)\n _.in_feature = _.gene_name = _.desc = _.chrom_start = _.chrom_end = _.strand = None\n _.feature_seq_str = \"\"\n _.coding_blocks = []\n \n def _update_sequin_feature(fields):\n if fields[0] != \"\" and fields[1] != \"\":\n # If the first two fields are present, this specifies a sequence range\n if not (fields[0].isdigit() and fields[1].isdigit()):\n # We will only attempt to utilize *complete* CDS features\n # (None of the start or end positions can be qualified by \">\" or \"<\")\n _.in_feature = \"CDS-partial\"\n return\n\n # Append the specified sequence to the `_.feature_seq_str`.\n # Note: Sequin table coordinates, like GenBank, are 1-indexed, right-closed.\n start = int(fields[0])\n end = int(fields[1])\n if _.strand is None: \n _.strand = '+' if start <= end else '-'\n elif _.strand != ('+' if start <= end else '-'):\n sys.stderr.write(\"WARN: strand changed direction, invalid CDS\")\n _.in_feature = \"CDS-partial\"\n return\n if _.strand == '-':\n start, end = end, start\n start -= 1\n ref_contig = ref_contigs[_.in_contig]\n seg = str(ref_contig.seq)[start:end]\n _.coding_blocks.append((start, end))\n _.feature_seq_str = seg + _.feature_seq_str if _.strand == '-' else _.feature_seq_str + seg\n _.chrom_start = min(start, _.chrom_start if _.chrom_start is not None else float('inf'))\n _.chrom_end = max(end, _.chrom_end if _.chrom_end is not None else float('-inf'))\n \n elif len(fields) >= 5:\n # If the first three fields are blank, this specifies a qualifier key + value\n if fields[3] == \"gene\":\n _.gene_name = fields[4]\n elif fields[3] == \"product\":\n _.desc = fields[4]\n \n with open(sequin_path) as f:\n for line in f:\n line = line.rstrip(\"\\n\")\n fields = line.split(\"\\t\", 4)\n if len(line.strip()) == 0:\n # Whitespace-only lines signal the end of feature data for a contig.\n # They may be followed by INFO: lines from the annotator, which we ignore.\n _save_sequin_feature()\n _.in_contig = None\n elif _.in_contig is None and line[0] == '>':\n # Lines that begin with \">Feature \" signal the start of feature data for a contig\n # Fields are separated by spaces; the second field is the full contig ID\n _save_sequin_feature()\n sp_fields = line[1:].split(' ')\n if sp_fields[0] == 'Feature' and len(sp_fields) >= 2:\n if ref_contigs.has_key(sp_fields[1]):\n _.in_contig = sp_fields[1]\n elif not quiet:\n sys.stderr.write(\"WARN: unknown contig in Sequin file: %s\" % sp_fields[1])\n elif _.in_contig is not None:\n if len(fields) < 3: \n if not quiet: sys.stderr.write(\"WARN: incomplete Sequin line: %s\" % line)\n next\n in_new_feature = fields[2].strip() != \"\"\n if _.in_feature is None or in_new_feature:\n _save_sequin_feature()\n _.in_feature = fields[2].strip()\n if _.in_feature == \"CDS\":\n _update_sequin_feature(fields)\n elif _.in_feature == \"CDS\":\n _update_sequin_feature(fields)\n \n return annots",
"def fetchRefSeqByChrom(RefSeqBed=\"/fg/compbio-t/lgoff/magda/references/human/transcriptome/hg18/hg18_RefSeq.bed\"):\n res = {}\n iter = parseBed(RefSeqBed)\n for i in iter:\n res.setdefault(i.chr,{})\n res[i.chr].setdefault(i.strand,[])\n res[i.chr][i.strand].append(i)\n return res",
"def load_refgenomes(self, params, context=None):\n return self._client.call_method(\n 'ReferenceDataManager.load_refgenomes',\n [params], self._service_ver, context)",
"def fetchRefSeq(genome = 'hg18',lookupval = 'name'):\n cursor=gbdbConnect(gbdbname=genome)\n select=\"SELECT * FROM refGene\"\n cursor.execute(select)\n rows=cursor.fetchall()\n output={}\n for chr in genomelib.chr_names:\n output[chr]={}\n output[chr]['+']={}\n output[chr]['-']={}\n for row in rows:\n if row['chrom'] in genomelib.chr_names:\n output[row['chrom']][row['strand']][row[lookupval]]=row\n return output",
"def create_reference(filename):\n contigs = {}\n with open(filename, \"r\") as f:\n header = \"\"\n seq = \"\"\n for line in f:\n if line[0] == \">\":\n if header:\n contigs[header] = seq\n header = line[1:].rstrip()\n seq = \"\"\n else:\n seq += line.rstrip()\n\n return contigs",
"def loadContentSamplesFile(self, lines):\n refgenome = set()\n \n for line in lines:\n if line.startswith(\"#\"):\n continue\n \n tokens = line.rstrip(\"\\n\").split(\"\\t\")\n \n # create and fill a \"GbsSample\" object\n for samplesCol in [\"genotype\", \"flowcell\", \"lane\"]:\n if \"_\" in tokens[self.samplesCol2idx[samplesCol]]:\n msg = \"underscore in %s '%s', replace by dash '-'\" \\\n % (samplesCol, tokens[self.samplesCol2idx[samplesCol]])\n raise ValueError(msg)\n if \" \" in tokens[self.samplesCol2idx[samplesCol]]:\n msg = \"space in %s '%s', replace by dash '-'\" \\\n % (samplesCol, tokens[self.samplesCol2idx[samplesCol]])\n raise ValueError(msg)\n if \".\" in tokens[self.samplesCol2idx[samplesCol]]:\n msg = \"dot in %s '%s', replace by dash '-'\" \\\n % (samplesCol, tokens[self.samplesCol2idx[samplesCol]])\n raise ValueError(msg)\n geno = tokens[self.samplesCol2idx[\"genotype\"]]\n flowcell = tokens[self.samplesCol2idx[\"flowcell\"]]\n laneNum = int(tokens[self.samplesCol2idx[\"lane\"]])\n barcode = tokens[self.samplesCol2idx[\"barcode\"]]\n if self.fclnToKeep is not None and \\\n \"%s_%i\" % (flowcell, laneNum) != self.fclnToKeep:\n continue\n iSample = GbsSample(geno, flowcell, laneNum, barcode,\n \"before\" if int(self.lSteps[0]) < 3 \\\n else \"after\")\n iSample.refGenome = tokens[self.samplesCol2idx[\"ref_genome\"]]\n iSample.library = tokens[self.samplesCol2idx[\"library\"]]\n iSample.seqCenter = tokens[self.samplesCol2idx[\"seq_center\"]]\n iSample.seqPlatform = tokens[self.samplesCol2idx[\"seq_platform\"]]\n iSample.seqPlatformModel = tokens[self.samplesCol2idx[\"seq_platform_model\"]]\n iSample.date = tokens[self.samplesCol2idx[\"date\"]]\n iSample.initFastqFile1 \\\n = \"%s/%s\" % (self.pathToInReadsDir,\n tokens[self.samplesCol2idx[\"fastq_file_R1\"]])\n if tokens[self.samplesCol2idx[\"fastq_file_R2\"]] != \"\":\n iSample.initFastqFile2 \\\n = \"%s/%s\" % (self.pathToInReadsDir,\n tokens[self.samplesCol2idx[\"fastq_file_R2\"]])\n if iSample.id not in self.dSamples:\n self.dSamples[iSample.id] = iSample\n refgenome.add(iSample.refGenome)\n\n if flowcell not in self.dFlowcells:\n self.dFlowcells[flowcell] = []\n if laneNum not in self.dFlowcells[flowcell]:\n self.dFlowcells[flowcell].append(laneNum)\n\n # create and fill a \"GbsLane\" object\n laneId = \"%s_%i\" % (flowcell, laneNum)\n if laneId not in self.dLanes:\n self.dLanes[laneId] = GbsLane(laneId, flowcell, laneNum)\n self.dLanes[laneId].insert(iSample)\n\n # create and fill a \"GbsGeno\" object\n if geno not in self.dGenos:\n self.dGenos[geno] = GbsGeno(geno)\n self.dGenos[geno].insert(iSample)\n \n if (\"4\" in self.lSteps or \"5\" in self.lSteps or \"6\" in self.lSteps \\\n or \"7\" in self.lSteps or \"8\" in self.lSteps) \\\n and len(refgenome) > 1:\n print(refgenome)\n msg = \"samples file contains more than one reference genome\"\n raise ValueError(msg)",
"def import_file(some_genbank, collection):\n with open(some_genbank, 'r') as open_file:\n collection = kv.get_collection(collection)\n\n # Each \"record\" in genbank file is read, corresponds to individual contigs\n for record in SeqIO.parse(open_file, 'gb'):\n current_contig = record.name\n try:\n current_species = record.annotations['source']\n except KeyError:\n name = re.search(r'\\w+\\/(.+)\\.\\w+$', some_genbank)\n current_species = name.group(1)\n \n\n collection.insert_one({\n 'species':current_species,\n 'contig':current_contig,\n 'dna_seq':str(record.seq),\n 'type':'contig'\n })\n\n print \"Importing {}\".format(current_contig)\n ssu_gene = get_16S(record)\n if ssu_gene:\n try:\n locus_tag = ssu_gene[0].qualifiers['locus_tag'][0]\n except KeyError:\n locus_tag = None\n \n parsed_location = kv.get_gene_location(ssu_gene[0].location)\n gene_record = {\n 'species':current_species,\n 'location':{\n 'contig':current_contig,\n 'start':parsed_location[0],\n 'end':parsed_location[1],\n 'strand':parsed_location[2],\n },\n 'locus_tag':locus_tag,\n 'annotation':ssu_gene[0].qualifiers['product'][0],\n 'dna_seq':ssu_gene[1],\n 'type':'16S'\n }\n print \"adding 16S gene!\"\n collection.insert_one(gene_record)\n kv.get_collection('16S').insert_one(gene_record)\n\n for feature in record.features:\n if feature.type == 'CDS':\n parsed_location = kv.get_gene_location(feature.location)\n try:\n locus_tag = feature.qualifiers['locus_tag'][0]\n except KeyError:\n locus_tag = None\n\n gene_record = {\n 'species':current_species,\n 'location':{\n 'contig':current_contig,\n 'start':parsed_location[0],\n 'end':parsed_location[1],\n 'strand':parsed_location[2],\n 'index':None\n },\n 'locus_tag':locus_tag,\n 'annotation':feature.qualifiers['product'][0],\n 'dna_seq':get_dna_seq(feature, record),\n 'aa_seq':feature.qualifiers['translation'][0],\n 'type':'gene'\n }\n collection.insert_one(gene_record)",
"def get_refs(genome_build, aligner, config):\n find_fn = _find_file(config[CONFIG_KEY], startswith=True)\n ref_prefix = sret.find_ref_prefix(genome_build, find_fn)\n return sret.standard_genome_refs(genome_build, aligner, ref_prefix, _list(config[CONFIG_KEY]))",
"def gbk_parse(fname):\n fhand = _open_file(gbkfname)\n unk = 1 \n\n for record in SeqIO.parse(fhand, \"genbank\"):\n\n gene_tags = dict()\n tx_tags = collections.defaultdict(list) \n exon = collections.defaultdict(list) \n cds = collections.defaultdict(list) \n mol_type, chr_id = None, None \n\n for rec in record.features:\n\n if rec.type == 'source':\n mol_type = rec.qualifiers['mol_type'][0]\n try:\n chr_id = rec.qualifiers['chromosome'][0]\n except:\n chr_id = record.name \n continue \n\n strand='-'\n strand='+' if rec.strand>0 else strand\n \n fid = None \n try:\n fid = rec.qualifiers['gene'][0]\n except:\n pass\n\n transcript_id = None\n try:\n transcript_id = rec.qualifiers['transcript_id'][0]\n except:\n pass \n\n if re.search(r'gene', rec.type):\n gene_tags[fid] = (rec.location._start.position+1, \n rec.location._end.position, \n strand,\n rec.type,\n rec.qualifiers['note'][0])\n elif rec.type == 'exon':\n exon[fid].append((rec.location._start.position+1, \n rec.location._end.position))\n elif rec.type=='CDS':\n cds[fid].append((rec.location._start.position+1, \n rec.location._end.position))\n else: \n # get all transcripts \n if transcript_id: \n tx_tags[fid].append((rec.location._start.position+1,\n rec.location._end.position, \n transcript_id,\n rec.type))\n # record extracted, generate feature table\n unk = feature_table(chr_id, mol_type, strand, gene_tags, tx_tags, cds, exon, unk)\n \n #break\n fhand.close()",
"def load_reference(self, path_to_reference):\n with open(path_to_reference,'r') as f:\n qids_to_relevant_docids = self.load_reference_from_stream(f)\n return qids_to_relevant_docids",
"def read_in_translate_vcf(vcf_file, ref_file, compressed=True):\n from Bio import SeqIO\n import numpy as np\n\n prots = {}\n\n posLoc = 0\n refLoc = 0\n altLoc = 0\n sampLoc = 9\n\n with open(vcf_file) as f:\n for line in f:\n if line[0] != '#':\n #actual data\n line = line.strip()\n dat = line.split('\\t')\n POS = int(dat[posLoc])\n REF = dat[refLoc]\n ALT = dat[altLoc].split(',')\n GEN = dat[0] #'CHROM' or the gene name here\n calls = np.array(dat[sampLoc:])\n\n #get samples that differ from Ref at this site\n recCalls = {}\n for sname, sa in zip(samps, calls):\n if sa != '.':\n recCalls[sname] = sa\n\n #store position and the altLoc\n for seq, gen in recCalls.iteritems():\n alt = str(ALT[int(gen[0])-1]) #get the index of the alternate\n ref = REF\n pos = POS-1 #VCF numbering starts from 1, but Reference seq numbering\n #will be from 0 because it's python!\n gen = GEN #from CHROM, gene name\n\n if gen not in prots.keys():\n prots[gen] = {}\n prots[gen]['sequences'] = {}\n prots[gen]['positions'] = []\n prots[gen]['reference'] = ''\n if seq not in prots[gen]['sequences'].keys():\n prots[gen]['sequences'][seq] = {}\n\n #will never be insertion or deletion! because translation.\n prots[gen]['sequences'][seq][pos] = alt\n prots[gen]['positions'].append(pos)\n\n elif line[0] == '#' and line[1] == 'C':\n #header line, get all the information\n header = line.strip().split('\\t')\n posLoc = header.index(\"POS\")\n refLoc = header.index(\"REF\")\n altLoc = header.index(\"ALT\")\n sampLoc = header.index(\"FORMAT\")+1\n samps = header[sampLoc:]\n nsamp = len(samps)\n\n for refSeq in SeqIO.parse(translation_ref_file(path), format='fasta'):\n prots[refSeq.name]['reference'] = str(refSeq.seq)\n posN = np.array(prots[refSeq.name]['positions'])\n posN = np.unique(posN)\n prots[refSeq.name]['positions'] = np.sort(posN)\n\n return prots",
"def load_genomes(self, params, context=None):\n return self._client.call_method(\n 'ReferenceDataManager.load_genomes',\n [params], self._service_ver, context)",
"def _process_dbxref(self):\n\n raw = '/'.join((self.rawdir, 'dbxref'))\n logger.info(\"processing dbxrefs\")\n line_counter = 0\n\n with open(raw, 'r') as f:\n filereader = csv.reader(f, delimiter='\\t', quotechar='\\\"')\n f.readline() # read the header row; skip\n for line in filereader:\n (dbxref_id, db_id, accession, version, description, url) = line\n # dbxref_id\tdb_id\taccession\tversion\tdescription\turl\n # 1\t2\tSO:0000000\t\"\"\n\n db_ids = { # the databases to fetch\n 50: 'PMID', # pubmed\n 68: 'RO', # obo-rel\n 71: 'FBdv', # FBdv\n 74: 'FBbt', # FBbt\n # 28:, # genbank\n 30: 'OMIM', # MIM\n # 38, # ncbi\n 75: 'ISBN', # ISBN\n 46: 'PMID', # PUBMED\n 51: 'ISBN', # isbn\n 52: 'SO', # so\n # 76, # http\n 77: 'PMID', # PMID\n 80: 'FBcv', # FBcv\n # 95, # MEDLINE\n 98: 'REACT', # Reactome\n 103: 'CHEBI', # Chebi\n 102: 'MESH', # MeSH\n 106: 'OMIM', # OMIM\n 105: 'KEGG-path', # KEGG pathway\n 107: 'DOI', # doi\n 108: 'CL', # CL\n 114: 'CHEBI', # CHEBI\n 115: 'KEGG', # KEGG\n 116: 'PubChem', # PubChem\n # 120, # MA???\n 3: 'GO', # GO\n 4: 'FlyBase', # FlyBase\n # 126, # URL\n 128: 'PATO', # PATO\n # 131, # IMG\n 2: 'SO', # SO\n 136: 'MESH', # MESH\n 139: 'CARO', # CARO\n 140: 'NCBITaxon', # NCBITaxon\n # 151, # MP ???\n 161: 'DOI', # doi\n 36: 'BDGP', # BDGP\n # 55, # DGRC\n # 54, # DRSC\n # 169, # Transgenic RNAi project???\n 231: 'RO', # RO ???\n 180: 'NCBIGene', # entrezgene\n # 192, # Bloomington stock center\n 197: 'UBERON', # Uberon\n 212: 'ENSEMBL', # Ensembl\n # 129, # GenomeRNAi\n 275: 'PMID', # PubMed\n 286: 'PMID', # pmid\n 264: 'HGNC',\n # 265: 'OMIM', # OMIM_Gene\n 266: 'OMIM', # OMIM_Phenotype\n 300: 'DOID', # DOID\n 302: 'MESH', # MSH\n 347: 'PMID', # Pubmed\n }\n\n if accession.strip() != '' and int(db_id) in db_ids:\n # scrub some identifiers here\n m = re.match(\n r'(doi|SO|GO|FBcv|FBbt_root|FBdv|FBgn|FBdv_root|FlyBase|FBbt):',\n accession)\n if m:\n accession = re.sub(m.group(1)+r'\\:', '', accession)\n elif re.match(\n r'(FlyBase miscellaneous CV|cell_lineprop|relationship type|FBgn$)',\n accession):\n continue\n elif re.match(r'\\:', accession): # starts with a colon\n accession = re.sub(r'\\:', '', accession)\n elif re.search(r'\\s', accession):\n # skip anything with a space\n # logger.debug(\n # 'dbxref %s accession has a space: %s',\n # dbxref_id, accession)\n continue\n\n if re.match(r'http', accession):\n did = accession.strip()\n else:\n prefix = db_ids.get(int(db_id))\n did = ':'.join((prefix, accession.strip()))\n if re.search(r'\\:', accession) and prefix != 'DOI':\n logger.warning(\n 'id %s may be malformed; skipping', did)\n\n self.dbxrefs[dbxref_id] = {db_id: did}\n\n elif url != '':\n self.dbxrefs[dbxref_id] = {db_id: url.strip()}\n else:\n continue\n\n # the following are some special cases that we scrub\n if int(db_id) == 2 \\\n and accession.strip() == 'transgenic_transposon':\n # transgenic_transposable_element\n self.dbxrefs[dbxref_id] = {db_id: 'SO:0000796'}\n\n line_counter += 1\n\n return",
"def parse_genbank(email = \"[email protected]\", ref_id = \"NC_045512.2\"):\n ## ============ Fetch genbank record ============ ##\n # Set email \n Entrez.email = email\n # Make handel object \n handle = Entrez.efetch(db=\"nuccore\", id=ref_id, rettype=\"gb\", retmode=\"text\")\n # Save the record -- only extract first record (there should be only one)\n record = next(SeqIO.parse(handle, \"gb\"))\n \n ## ============ Parse genbank record ============ ##\n # Dictionary to hold the open reading frames\n ORFS = dict()\n for feature in record.features:\n # Only extract the coding sequences\n if feature.type == \"CDS\": \n # Special considerations for overlapping ORF\n if feature.qualifiers.get(\"gene\")[0] == \"ORF1ab\":\n # Get the open reading frame that contains the ribosomal slippage\n if \"-1 ribosomal frameshift\" in str(feature.qualifiers.get(\"note\")): \n # Extract the non-overlapping and frameshifted indices\n name = \"ORF1ab\"\n ORFS[name] = feature\n # Get the open reading frame that just contains the 'a' portion\n else:\n # Extract the non-overlapping and frameshifted indices\n name = \"ORF1a\"\n ORFS[name] = feature\n # Iterate ove the remaining trivial CDS \n else:\n # Build the lookup dictionary with the normal sequences\n name = feature.qualifiers.get(\"gene\")[0]\n ORFS[name] = feature\n # Return Lookup dictionary\n return ORFS, record.seq",
"def read_GFF(gff_filename):\n gff_info = {} # loci --> LocusInfo\n tmp = {} # loci PB.X --> list of GFF records for PB.X.Y\n\n for r in collapseGFFReader(gff_filename):\n m = rex_pbid.match(r.seqid)\n if m is None:\n raise Exception(f\"Expected PBID format PB.X.Y but saw {r.seqid}\")\n locus = m.group(1) # ex: PB.1\n if locus not in tmp:\n tmp[locus] = [r]\n gff_info[locus] = LocusInfo(\n chrom=r.chr, strand=r.strand, regions=None, isoforms=None\n )\n else:\n if gff_info[locus].chrom != r.chr:\n logger.warning(\n f\"WARNING: Expected {r.seqid} to be on {gff_info[locus].chrom} but saw {r.chr}. Could be minimap2 multi-mapping inconsistency for repetitive genes. Check later.\\n\"\n )\n tmp[locus].append(r)\n\n # now figure out the exonic regions for each gene PB.X\n for locus, records in tmp.items():\n c = ClusterTree(0, 0)\n for r in records:\n for e in r.ref_exons:\n c.insert(\n max(0, e.start - extra_bp_around_junctions),\n e.end + extra_bp_around_junctions,\n 1,\n )\n\n regions = [(a, b) for (a, b, junk) in c.getregions()]\n regions[0] = (max(0, regions[0][0] - __padding_before_after__), regions[0][1])\n regions[-1] = (\n max(0, regions[-1][0]),\n regions[-1][1] + __padding_before_after__,\n )\n gff_info[locus] = LocusInfo(\n chrom=gff_info[locus].chrom,\n strand=gff_info[locus].strand,\n regions=regions,\n isoforms=[r.seqid for r in records],\n )\n\n return gff_info",
"def load_reference_from_stream(self, f):\n qids_to_relevant_docids = {}\n for l in f:\n vals = l.strip().split('\\t')\n if len(vals) != 4:\n vals = l.strip().split(' ')\n if len(vals) != 4:\n pdb.set_trace()\n raise IOError('\\\"%s\\\" is not valid format' % l)\n\n qid = vals[0]\n if qid in qids_to_relevant_docids:\n pass\n else:\n qids_to_relevant_docids[qid] = []\n _rel = int(vals[3])\n if _rel > 0:\n qids_to_relevant_docids[qid].append(vals[2])\n\n return qids_to_relevant_docids",
"def retrieve_genome_data(filepath):\n try:\n seqrecords = list(SeqIO.parse(filepath, \"genbank\"))\n except:\n seqrecords = []\n # filename = filepath.split(\"/\")[-1]\n if len(seqrecords) == 0:\n print(f\"There are no records in {filepath.name}.\")\n seqrecord = None\n elif len(seqrecords) > 1:\n print(f\"There are multiple records in {filepath.name}.\" )\n seqrecord = None\n else:\n seqrecord = seqrecords[0]\n return seqrecord",
"def write_contigs(self, refs):\n\n contigs_group = self.f.create_group('contigs')\n\n for ref_name, ref in refs:\n contig = contigs_group.create_group(ref_name)\n contig.attrs['name'] = ref_name\n contig.attrs['seq'] = ref\n contig.attrs['len'] = len(ref)",
"def get_cds_start_end_locations_genbank_file(filename):\n # Loop over the features\n genes = defaultdict(list)\n cds = 0\n for seq_record in SeqIO.parse(filename, \"genbank\"):\n print(f'Dealing with GenBank record {seq_record.id}')\n for seq_feature in seq_record.features:\n if seq_feature.type == \"CDS\" and 'protein_id' in seq_feature.qualifiers:\n cds += 1\n prot_id = seq_feature.qualifiers['protein_id'][0]\n start, end = int(seq_feature.location.start), int(seq_feature.location.end)\n genes[prot_id] = genes.get(prot_id, []) + [start, end]\n print(f'There are {cds} CDS and {len(genes)} genes annoted for this genbank record')\n return genes",
"def process_cds(cfs, ref):\n # unpack the tuple\n feat, scaffold, phase = cfs\n # First, extract the sequence of the CDS from the scaffold. This should\n # respect the strand, so we won't have to reverse-complement\n featseq = feat.extract(ref[scaffold])\n return featseq",
"def load(self, reffile):\n with open(reffile, 'r') as rfh:\n self._seqdict = json.load(rfh)\n\n # Check format\n keys = ['name','subtype','accessions']\n if self._nloci > 1:\n keys.append('loci')\n for seqkey,seqs in self._seqdict.iteritems():\n for seq,seqentry in seqs.iteritems():\n self._genenum += 1\n\n for k in keys:\n if not k in seqentry:\n raise Exception('Improperly formated SeqDict object')\n\n if self._nloci > 1:\n if len(seqentry['loci']) != self._nloci:\n raise Exception('Improperly formated SeqDict object')\n\n return None",
"def load_data() -> list:\n # trans_dict is used for changing the given names into standardized names.\n trans_dict = {\"chr1\": \"1\", \"chr2\": \"2\", \"chr3\": \"3\", \"chr4\": \"4\", \"chr5\": \"5\", \"chr6\": \"6\", \"chr7\": \"7\",\n \"chr8\": \"8\", \"chr9\": \"9\", \"chr10\": \"10\", \"chr11\": \"11\", \"chr12\": \"12\", \"chr13\": \"13\", \"chr14\": \"14\",\n \"chr15\": \"15\", \"chr16\": \"16\", \"chr17\": \"17\", \"chr18\": \"18\", \"chr19\": \"19\", \"chrx\": \"x\", \"chry\": \"y\"}\n # This try statement catches user error.\n try:\n with open(sys.argv[1]) as bed_file, open(sys.argv[2]) as fasta_file:\n fasta_records = []\n # Opens the bed file and splits into lists\n bed_file = list(csv.reader(bed_file, delimiter='\\t'))\n # Changes the names of the chromosomes in bed file, does some light rearranging and formatting.\n bed_file = [[trans_dict[record[0].lower()], record[1], record[3][0:record[3].index(\n '\\'')]] for record in bed_file]\n # Sorts the desired indices by chromosome, then by index in the chromosome.\n bed_file = sorted(bed_file, key=itemgetter(1))\n bed_file = sorted(bed_file, key=itemgetter(0))\n # This stores the desired indexes for each chromosome.\n indexable_bed_records = {'1': [], '2': [], '3': [], '4': [], '5': [], '6': [], '7': [], '8': [], '9': [],\n '10': [], '11': [], '12': [], '13': [], '14': [], '15': [], '16': [], '17': [],\n '18': [], '19': [], 'x': [], 'y': []}\n # Put each desired index into it's appropriate chromosome list.\n for record in bed_file:\n indexable_bed_records[record[0]].append([record[2], record[1]])\n # Loops over fasta records in the supplied fasta file\n for fasta_record in fasta_iter(fasta_file):\n # grabs the chromosome id\n chrom_id = fasta_record[\"header\"][:fasta_record[\"header\"].index(' ')].lower()\n # Some chromosomes are not desired, skip them.\n if chrom_id not in indexable_bed_records.keys():\n continue\n # Grabs the indexes we want to extract from the chromosome.\n indexes = indexable_bed_records[chrom_id]\n # Grabs each index+/-10 from the sequence\n for index in indexes:\n fasta_records.append([index[0], fasta_record[\"seq\"][int(index[1]) - 10:int(index[1]) + 10]])\n # Returns a list of lists of format [5'/3',splice site sequence]\n return fasta_records\n # Catches user error.\n except (FileNotFoundError, IndexError) as e:\n if type(e) is IndexError:\n sys.stderr.write(\"Usage: {} bed_file fasta_file\\n\\tbed_file: The appropriate bed file. \\n\\t\"\n \"fasta_file: The appropriate fasta file.\\n\".format(os.path.basename(__file__)))\n elif type(e) is FileNotFoundError:\n sys.stderr.write(\"One of the specified files was not found.\\n\")\n sys.exit(1)",
"def get_ref_seq_dict(ref_seq):\n return SeqIO.to_dict(SeqIO.parse(ref_seq, 'fasta')) if ref_seq else None",
"def get_load_references(self):\n for path, properties in self.load.items():\n yield CaseLoadReference(path=path, properties=list(properties))",
"def read_genbank(genome_accession_no, genbank_file=None):\n \n if genbank_file:\n print \"reading genbank file %s\" % genbank_file\n seq_record = SeqIO.read(genbank_file, \"genbank\")\n else:\n print \"downloading and parsing genbank file for %s\" % genome_accession_no\n handle = Entrez.efetch(db=\"nucleotide\", rettype=\"gb\",\n retmode=\"text\", id=genome_accession_no)\n seq_record = SeqIO.read(handle, \"gb\")\n handle.close()\n return seq_record",
"def read_bgen(\n path: PathType,\n metafile_path: Optional[PathType] = None,\n sample_path: Optional[PathType] = None,\n chunks: Union[str, int, Tuple[int, int, int]] = \"auto\",\n lock: bool = False,\n persist: bool = True,\n contig_dtype: DType = \"str\",\n gp_dtype: DType = \"float32\",\n) -> Dataset:\n if isinstance(chunks, tuple) and len(chunks) != 3:\n raise ValueError(f\"`chunks` must be tuple with 3 items, not {chunks}\")\n if not np.issubdtype(gp_dtype, np.floating):\n raise ValueError(\n f\"`gp_dtype` must be a floating point data type, not {gp_dtype}\"\n )\n if not np.issubdtype(contig_dtype, np.integer) and np.dtype(\n contig_dtype\n ).kind not in {\"U\", \"S\"}:\n raise ValueError(\n f\"`contig_dtype` must be of string or int type, not {contig_dtype}\"\n )\n\n path = Path(path)\n sample_path = Path(sample_path) if sample_path else path.with_suffix(\".sample\")\n\n if sample_path.exists():\n sample_id = read_samples(sample_path).sample_id.values.astype(\"U\")\n else:\n sample_id = _default_sample_ids(path)\n\n bgen_reader = BgenReader(path, metafile_path=metafile_path, dtype=gp_dtype)\n\n df = read_metafile(bgen_reader.metafile_path)\n if persist:\n df = df.persist()\n arrs = dataframe_to_dict(df, METAFILE_DTYPE)\n\n variant_id = arrs[\"id\"]\n variant_contig = arrs[\"chrom\"].astype(contig_dtype)\n variant_contig, variant_contig_names = encode_contigs(variant_contig)\n variant_contig_names = list(variant_contig_names)\n variant_position = arrs[\"pos\"]\n variant_allele = da.hstack((arrs[\"a1\"][:, np.newaxis], arrs[\"a2\"][:, np.newaxis]))\n\n call_genotype_probability = da.from_array(\n bgen_reader,\n chunks=chunks,\n lock=lock,\n fancy=False,\n asarray=False,\n name=f\"{bgen_reader.name}:read_bgen:{path}\",\n )\n call_dosage = _to_dosage(call_genotype_probability)\n\n ds: Dataset = create_genotype_dosage_dataset(\n variant_contig_names=variant_contig_names,\n variant_contig=variant_contig,\n variant_position=variant_position,\n variant_allele=variant_allele,\n sample_id=sample_id,\n call_dosage=call_dosage,\n call_genotype_probability=call_genotype_probability,\n variant_id=variant_id,\n )\n\n return ds"
] | [
"0.67605436",
"0.6540504",
"0.6476846",
"0.6414153",
"0.6203886",
"0.6145601",
"0.61397177",
"0.6064767",
"0.58231187",
"0.5773407",
"0.575686",
"0.57291776",
"0.55980134",
"0.5574859",
"0.5554105",
"0.55270934",
"0.5468517",
"0.54490787",
"0.54445904",
"0.5379438",
"0.53729737",
"0.5358047",
"0.53526497",
"0.53461915",
"0.53412426",
"0.5327989",
"0.531755",
"0.530025",
"0.5279313",
"0.5279085"
] | 0.6616143 | 1 |
Load all genes in the Sequin table as SeqRecords, fetching their sequence data from the reference. ref_contigs is a dictionary of ref contig sequences created with BioPython's SeqIO.to_dict(). | def get_sequin_annots(sequin_path, ref_contigs, quiet=False):
annots = defaultdict(list)
# We need a dummy class to hold the current state while parsing
# (otherwise the below private functions can't modify it; there's no "nonlocal" in python 2.x)
class _:
in_contig = None
in_feature = None
gene_name = None
desc = None
chrom_start = None
chrom_end = None
strand = None
feature_seq_str = ""
coding_blocks = []
def _save_sequin_feature():
# The only features we care about are the CDS features. Others get discarded during parsing.
if _.in_feature == "CDS":
if len(_.feature_seq_str) == 0:
if not quiet: sys.stderr.write("WARN: 0-length CDS in contig %s" % _.in_contig)
elif _.gene_name is None or _.strand is None or _.chrom_start is None or _.chrom_end is None:
if not quiet: sys.stderr.write("WARN: invalid CDS feature in contig %s" % _.in_contig)
else:
gene_seq = Seq(_.feature_seq_str, generic_dna)
if _.strand == '-':
gene_seq = gene_seq.reverse_complement()
gene_seq_record = SeqRecord(gene_seq, id=_.gene_name, name=_.gene_name, description=_.desc)
annot = Annot(_.chrom_start, _.chrom_end, _.strand == '-', gene_seq_record,
_.coding_blocks)
annots[contig_to_vcf_chrom(_.in_contig)].append(annot)
_.in_feature = _.gene_name = _.desc = _.chrom_start = _.chrom_end = _.strand = None
_.feature_seq_str = ""
_.coding_blocks = []
def _update_sequin_feature(fields):
if fields[0] != "" and fields[1] != "":
# If the first two fields are present, this specifies a sequence range
if not (fields[0].isdigit() and fields[1].isdigit()):
# We will only attempt to utilize *complete* CDS features
# (None of the start or end positions can be qualified by ">" or "<")
_.in_feature = "CDS-partial"
return
# Append the specified sequence to the `_.feature_seq_str`.
# Note: Sequin table coordinates, like GenBank, are 1-indexed, right-closed.
start = int(fields[0])
end = int(fields[1])
if _.strand is None:
_.strand = '+' if start <= end else '-'
elif _.strand != ('+' if start <= end else '-'):
sys.stderr.write("WARN: strand changed direction, invalid CDS")
_.in_feature = "CDS-partial"
return
if _.strand == '-':
start, end = end, start
start -= 1
ref_contig = ref_contigs[_.in_contig]
seg = str(ref_contig.seq)[start:end]
_.coding_blocks.append((start, end))
_.feature_seq_str = seg + _.feature_seq_str if _.strand == '-' else _.feature_seq_str + seg
_.chrom_start = min(start, _.chrom_start if _.chrom_start is not None else float('inf'))
_.chrom_end = max(end, _.chrom_end if _.chrom_end is not None else float('-inf'))
elif len(fields) >= 5:
# If the first three fields are blank, this specifies a qualifier key + value
if fields[3] == "gene":
_.gene_name = fields[4]
elif fields[3] == "product":
_.desc = fields[4]
with open(sequin_path) as f:
for line in f:
line = line.rstrip("\n")
fields = line.split("\t", 4)
if len(line.strip()) == 0:
# Whitespace-only lines signal the end of feature data for a contig.
# They may be followed by INFO: lines from the annotator, which we ignore.
_save_sequin_feature()
_.in_contig = None
elif _.in_contig is None and line[0] == '>':
# Lines that begin with ">Feature " signal the start of feature data for a contig
# Fields are separated by spaces; the second field is the full contig ID
_save_sequin_feature()
sp_fields = line[1:].split(' ')
if sp_fields[0] == 'Feature' and len(sp_fields) >= 2:
if ref_contigs.has_key(sp_fields[1]):
_.in_contig = sp_fields[1]
elif not quiet:
sys.stderr.write("WARN: unknown contig in Sequin file: %s" % sp_fields[1])
elif _.in_contig is not None:
if len(fields) < 3:
if not quiet: sys.stderr.write("WARN: incomplete Sequin line: %s" % line)
next
in_new_feature = fields[2].strip() != ""
if _.in_feature is None or in_new_feature:
_save_sequin_feature()
_.in_feature = fields[2].strip()
if _.in_feature == "CDS":
_update_sequin_feature(fields)
elif _.in_feature == "CDS":
_update_sequin_feature(fields)
return annots | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loadReferenceContigs(referencePath, alignmentSet, windows=None):\n # FIXME we should get rid of this entirely, but I think it requires\n # fixing the inconsistency in how contigs are referenced here versus in\n # pbcore.io\n\n # Read contigs from FASTA file (or XML dataset)\n refReader = ReferenceSet(referencePath)\n contigs = []\n if windows is not None:\n refNames = set([rw.refName for rw in windows])\n for contig in refReader:\n if contig.id in refNames:\n contigs.append(contig)\n else:\n contigs.extend([x for x in refReader])\n contigDict = dict([(x.id, x) for x in contigs])\n\n # initially each contig has an id of None -- this will be overwritten with the id from the cmp.h5, if there are any\n # reads mapped to it.\n for x in contigs:\n x.cmph5ID = None\n\n # Mark each contig with it's ID from the cmp.h5 - match them up using MD5s\n for x in alignmentSet.referenceInfoTable:\n if x.FullName in contigDict:\n contigDict[x.FullName].cmph5ID = x.ID\n\n return contigs",
"def fetchRefSeq(genome = 'hg18',lookupval = 'name'):\n cursor=gbdbConnect(gbdbname=genome)\n select=\"SELECT * FROM refGene\"\n cursor.execute(select)\n rows=cursor.fetchall()\n output={}\n for chr in genomelib.chr_names:\n output[chr]={}\n output[chr]['+']={}\n output[chr]['-']={}\n for row in rows:\n if row['chrom'] in genomelib.chr_names:\n output[row['chrom']][row['strand']][row[lookupval]]=row\n return output",
"def fetchRefSeqDict(RefSeqBed=\"/fg/compbio-t/lgoff/magda/references/human/transcriptome/hg18/hg18_RefSeq.bed\"):\n res = {}\n iter = parseBed(RefSeqBed)\n for i in iter:\n res[i.name] = i\n return res",
"def load_gene_dict(reference_genbank_name=\"data/covid-19-genbank.gb\"):\n recs = [rec for rec in SeqIO.parse(reference_genbank_name, \"genbank\")]\n gene_dict = {}\n for rec in recs:\n feats = [feat for feat in rec.features if feat.type == \"CDS\"]\n for feat in feats:\n content = '{}: {}'.format(feat.qualifiers['protein_id'][0], feat.qualifiers['product'][0])\n if feat.qualifiers['product'][0] == 'ORF1a polyprotein':\n continue\n if feat.location_operator == 'join':\n for item in feat.location.parts:\n key = (item.start.position, item.end.position)\n if 'translation' in feat.qualifiers:\n seq = feat.qualifiers['translation']\n if len(seq) == 1:\n amino_acid_seq = seq[0]\n gene_dict[key] = (content, amino_acid_seq)\n else:\n key = (feat.location.start.position, feat.location.end.position)\n if 'translation' in feat.qualifiers:\n seq = feat.qualifiers['translation']\n if len(seq) == 1:\n amino_acid_seq = seq[0]\n gene_dict[key] = (content, amino_acid_seq)\n return gene_dict",
"def load_refgenomes(self, params, context=None):\n return self._client.call_method(\n 'ReferenceDataManager.load_refgenomes',\n [params], self._service_ver, context)",
"def fetchRefSeqByChrom(RefSeqBed=\"/fg/compbio-t/lgoff/magda/references/human/transcriptome/hg18/hg18_RefSeq.bed\"):\n res = {}\n iter = parseBed(RefSeqBed)\n for i in iter:\n res.setdefault(i.chr,{})\n res[i.chr].setdefault(i.strand,[])\n res[i.chr][i.strand].append(i)\n return res",
"def import_file(some_genbank, collection):\n with open(some_genbank, 'r') as open_file:\n collection = kv.get_collection(collection)\n\n # Each \"record\" in genbank file is read, corresponds to individual contigs\n for record in SeqIO.parse(open_file, 'gb'):\n current_contig = record.name\n try:\n current_species = record.annotations['source']\n except KeyError:\n name = re.search(r'\\w+\\/(.+)\\.\\w+$', some_genbank)\n current_species = name.group(1)\n \n\n collection.insert_one({\n 'species':current_species,\n 'contig':current_contig,\n 'dna_seq':str(record.seq),\n 'type':'contig'\n })\n\n print \"Importing {}\".format(current_contig)\n ssu_gene = get_16S(record)\n if ssu_gene:\n try:\n locus_tag = ssu_gene[0].qualifiers['locus_tag'][0]\n except KeyError:\n locus_tag = None\n \n parsed_location = kv.get_gene_location(ssu_gene[0].location)\n gene_record = {\n 'species':current_species,\n 'location':{\n 'contig':current_contig,\n 'start':parsed_location[0],\n 'end':parsed_location[1],\n 'strand':parsed_location[2],\n },\n 'locus_tag':locus_tag,\n 'annotation':ssu_gene[0].qualifiers['product'][0],\n 'dna_seq':ssu_gene[1],\n 'type':'16S'\n }\n print \"adding 16S gene!\"\n collection.insert_one(gene_record)\n kv.get_collection('16S').insert_one(gene_record)\n\n for feature in record.features:\n if feature.type == 'CDS':\n parsed_location = kv.get_gene_location(feature.location)\n try:\n locus_tag = feature.qualifiers['locus_tag'][0]\n except KeyError:\n locus_tag = None\n\n gene_record = {\n 'species':current_species,\n 'location':{\n 'contig':current_contig,\n 'start':parsed_location[0],\n 'end':parsed_location[1],\n 'strand':parsed_location[2],\n 'index':None\n },\n 'locus_tag':locus_tag,\n 'annotation':feature.qualifiers['product'][0],\n 'dna_seq':get_dna_seq(feature, record),\n 'aa_seq':feature.qualifiers['translation'][0],\n 'type':'gene'\n }\n collection.insert_one(gene_record)",
"def read_refgene_file(self, file_path):\n with open(file_path) as fi:\n for line in fi:\n content = line.strip().split()\n refgene = RefGene()\n refgene.bin = int(content[0])\n refgene.name, refgene.chrom, refgene.strand = content[1], content[2].lower(), content[3]\n refgene.tx_start, refgene.tx_end = int(content[4]), int(content[5])\n refgene.cds_start, refgene.cds_end = int(content[6]), int(content[7])\n refgene.exon_count = int(content[8])\n refgene.exon_starts = [int(e) for e in content[9][:-1].split(',')]\n refgene.exon_ends = [int(e) for e in content[10][:-1].split(',')]\n refgene.core, refgene.name2 = int(content[11]), content[12]\n refgene.cds_start_stat, refgene.cds_end_stat = content[13], content[14]\n # refgene.exon_frames = [int(e) for e in content[15][:-1].split(',')]\n self.ref_genes.append(refgene)",
"def get_bed_annots(bed_path, ref_contigs, quiet=False):\n annots = defaultdict(list)\n with open(bed_path) as f:\n for line in f:\n line = line.strip().split(\"\\t\")\n # Note: BED coordinates are 0-indexed, right-open.\n chrom, start, end, name, strand = line[0], int(line[1]), int(line[2]), line[3], line[5]\n gene_id = line[12] if len(line) >= 13 else \"\"\n desc = line[13] if len(line) >= 14 else \"\"\n ref_contig = ref_contigs[chrom]\n gene_seq = Seq(str(ref_contig.seq)[start:end], generic_dna)\n if strand == '-':\n gene_seq = gene_seq.reverse_complement()\n gene_seq_record = SeqRecord(gene_seq, id=gene_id, name=name, description=desc)\n \n coding_blocks = []\n if (len(line) >= 12 and line[9].isdigit() and re.match(COMMA_DELIM_INTEGERS, line[10])\n and re.match(COMMA_DELIM_INTEGERS, line[11])):\n # We have full blockCount, blockSizes, and blockStarts annotations\n block_starts = map(int, re.split(r'\\s*,\\s*', line[11]))\n thick_start = int(line[6]) if line[6].isdigit() else start\n thick_end = int(line[7]) if line[7].isdigit() else end\n for i, block_size in enumerate(re.split(r'\\s*,\\s*', line[10])[0:int(line[9])]):\n if i >= len(block_starts): break\n block_start = block_starts[i] + start\n block_end = block_start + int(block_size)\n if block_end <= thick_start: next\n if block_start > thick_end: next\n block_start = max(thick_start, block_start)\n block_end = min(thick_end, block_end)\n coding_blocks.append((block_start, block_end))\n elif len(line) >= 8 and line[6].isdigit() and line[7].isdigit():\n # Only thickStart and thickEnd are specified. In this case, there is one coding block.\n coding_blocks.append((int(line[6]), int(line[7])))\n else:\n coding_blocks.append((start, end))\n \n annot = Annot(start, end, strand == '-', gene_seq_record, coding_blocks)\n annots[contig_to_vcf_chrom(chrom)].append(annot)\n return annots",
"def hostRefSeq(chr,start,end,strand):\n cursor=gbdbConnect()\n selSQL=\"SELECT * from refGene WHERE chrom='%s' AND txStart<='%d' AND txEnd>='%d'\" % (chr,int(start),int(end))\n cursor.execute(selSQL)\n rows=cursor.fetchall()\n results=[]\n if cursor.rowcount==0:\n return False\n else:\n for row in rows:\n results.append(row)\n return results",
"def read_in_translate_vcf(vcf_file, ref_file, compressed=True):\n from Bio import SeqIO\n import numpy as np\n\n prots = {}\n\n posLoc = 0\n refLoc = 0\n altLoc = 0\n sampLoc = 9\n\n with open(vcf_file) as f:\n for line in f:\n if line[0] != '#':\n #actual data\n line = line.strip()\n dat = line.split('\\t')\n POS = int(dat[posLoc])\n REF = dat[refLoc]\n ALT = dat[altLoc].split(',')\n GEN = dat[0] #'CHROM' or the gene name here\n calls = np.array(dat[sampLoc:])\n\n #get samples that differ from Ref at this site\n recCalls = {}\n for sname, sa in zip(samps, calls):\n if sa != '.':\n recCalls[sname] = sa\n\n #store position and the altLoc\n for seq, gen in recCalls.iteritems():\n alt = str(ALT[int(gen[0])-1]) #get the index of the alternate\n ref = REF\n pos = POS-1 #VCF numbering starts from 1, but Reference seq numbering\n #will be from 0 because it's python!\n gen = GEN #from CHROM, gene name\n\n if gen not in prots.keys():\n prots[gen] = {}\n prots[gen]['sequences'] = {}\n prots[gen]['positions'] = []\n prots[gen]['reference'] = ''\n if seq not in prots[gen]['sequences'].keys():\n prots[gen]['sequences'][seq] = {}\n\n #will never be insertion or deletion! because translation.\n prots[gen]['sequences'][seq][pos] = alt\n prots[gen]['positions'].append(pos)\n\n elif line[0] == '#' and line[1] == 'C':\n #header line, get all the information\n header = line.strip().split('\\t')\n posLoc = header.index(\"POS\")\n refLoc = header.index(\"REF\")\n altLoc = header.index(\"ALT\")\n sampLoc = header.index(\"FORMAT\")+1\n samps = header[sampLoc:]\n nsamp = len(samps)\n\n for refSeq in SeqIO.parse(translation_ref_file(path), format='fasta'):\n prots[refSeq.name]['reference'] = str(refSeq.seq)\n posN = np.array(prots[refSeq.name]['positions'])\n posN = np.unique(posN)\n prots[refSeq.name]['positions'] = np.sort(posN)\n\n return prots",
"def loadContentSamplesFile(self, lines):\n refgenome = set()\n \n for line in lines:\n if line.startswith(\"#\"):\n continue\n \n tokens = line.rstrip(\"\\n\").split(\"\\t\")\n \n # create and fill a \"GbsSample\" object\n for samplesCol in [\"genotype\", \"flowcell\", \"lane\"]:\n if \"_\" in tokens[self.samplesCol2idx[samplesCol]]:\n msg = \"underscore in %s '%s', replace by dash '-'\" \\\n % (samplesCol, tokens[self.samplesCol2idx[samplesCol]])\n raise ValueError(msg)\n if \" \" in tokens[self.samplesCol2idx[samplesCol]]:\n msg = \"space in %s '%s', replace by dash '-'\" \\\n % (samplesCol, tokens[self.samplesCol2idx[samplesCol]])\n raise ValueError(msg)\n if \".\" in tokens[self.samplesCol2idx[samplesCol]]:\n msg = \"dot in %s '%s', replace by dash '-'\" \\\n % (samplesCol, tokens[self.samplesCol2idx[samplesCol]])\n raise ValueError(msg)\n geno = tokens[self.samplesCol2idx[\"genotype\"]]\n flowcell = tokens[self.samplesCol2idx[\"flowcell\"]]\n laneNum = int(tokens[self.samplesCol2idx[\"lane\"]])\n barcode = tokens[self.samplesCol2idx[\"barcode\"]]\n if self.fclnToKeep is not None and \\\n \"%s_%i\" % (flowcell, laneNum) != self.fclnToKeep:\n continue\n iSample = GbsSample(geno, flowcell, laneNum, barcode,\n \"before\" if int(self.lSteps[0]) < 3 \\\n else \"after\")\n iSample.refGenome = tokens[self.samplesCol2idx[\"ref_genome\"]]\n iSample.library = tokens[self.samplesCol2idx[\"library\"]]\n iSample.seqCenter = tokens[self.samplesCol2idx[\"seq_center\"]]\n iSample.seqPlatform = tokens[self.samplesCol2idx[\"seq_platform\"]]\n iSample.seqPlatformModel = tokens[self.samplesCol2idx[\"seq_platform_model\"]]\n iSample.date = tokens[self.samplesCol2idx[\"date\"]]\n iSample.initFastqFile1 \\\n = \"%s/%s\" % (self.pathToInReadsDir,\n tokens[self.samplesCol2idx[\"fastq_file_R1\"]])\n if tokens[self.samplesCol2idx[\"fastq_file_R2\"]] != \"\":\n iSample.initFastqFile2 \\\n = \"%s/%s\" % (self.pathToInReadsDir,\n tokens[self.samplesCol2idx[\"fastq_file_R2\"]])\n if iSample.id not in self.dSamples:\n self.dSamples[iSample.id] = iSample\n refgenome.add(iSample.refGenome)\n\n if flowcell not in self.dFlowcells:\n self.dFlowcells[flowcell] = []\n if laneNum not in self.dFlowcells[flowcell]:\n self.dFlowcells[flowcell].append(laneNum)\n\n # create and fill a \"GbsLane\" object\n laneId = \"%s_%i\" % (flowcell, laneNum)\n if laneId not in self.dLanes:\n self.dLanes[laneId] = GbsLane(laneId, flowcell, laneNum)\n self.dLanes[laneId].insert(iSample)\n\n # create and fill a \"GbsGeno\" object\n if geno not in self.dGenos:\n self.dGenos[geno] = GbsGeno(geno)\n self.dGenos[geno].insert(iSample)\n \n if (\"4\" in self.lSteps or \"5\" in self.lSteps or \"6\" in self.lSteps \\\n or \"7\" in self.lSteps or \"8\" in self.lSteps) \\\n and len(refgenome) > 1:\n print(refgenome)\n msg = \"samples file contains more than one reference genome\"\n raise ValueError(msg)",
"def refseq_based_clustering(self):\n self.refseq_based = NonRedSetDict()\n for prey in self.ivv_info.Prey_info().preys():\n refseqid = self.get_refseq(prey)\n if refseqid:\n self.refseq_based.append_Dict(refseqid, prey)",
"def get_refs(genome_build, aligner, config):\n find_fn = _find_file(config[CONFIG_KEY], startswith=True)\n ref_prefix = sret.find_ref_prefix(genome_build, find_fn)\n return sret.standard_genome_refs(genome_build, aligner, ref_prefix, _list(config[CONFIG_KEY]))",
"def fetchRefSeqIntervalsIndexed(genome='hg18',proteinCodingOnly=False,verbose=False):\n cursor=gbdbConnect(gbdbname=genome)\n select=\"SELECT * FROM refGene\"\n if verbose:\n sys.stderr.write(\"Fetching RefSeq Sequences...\\n\")\n cursor.execute(select)\n rows=cursor.fetchall()\n output={}\n for chr in genomelib.chr_names:\n output[chr]={}\n output[chr]['+']=[]\n output[chr]['-']=[]\n if verbose:\n sys.stderr.write(\"Creating index by chr and strand...\\n\")\n \n for row in rows:\n if proteinCodingOnly and not row['name'].startswith('NM'):\n continue\n try:\n exonStarts = map(int,row['exonStarts'].rstrip().split(\",\")[:-1])\n exonEnds = map(int,row['exonEnds'].rstrip().split(\",\")[:-1])\n except:\n print \"\\t\".join([\"%s:%s\" % (k,v) for k,v in row.iteritems()])\n start = int(row['txStart'])\n exonOffsets = [x-start for x in exonStarts]\n exonLengths = []\n for i in xrange(len(exonStarts)):\n exonLengths.append(exonEnds[i]-exonStarts[i]+1)\n if row['chrom'] in genomelib.chr_names:\n output[row['chrom']][row['strand']].append(intervallib.SplicedInterval(row['chrom'],row['txStart'],row['txEnd'],row['strand'],\",\".join([str(x) for x in exonLengths]),\",\".join([str(x) for x in exonOffsets]),name=row['name2']))\n \n #Sort \n if verbose:\n sys.stderr.write(\"Sorting:\\n\")\n tstart = time.time()\n for key in output.keys():\n if verbose:\n sys.stderr.write(\"\\t%s\\t\" % key)\n output[key]['+'].sort()\n output[key]['-'].sort()\n tend = time.time()\n if verbose:\n sys.stderr.write('%0.2f sec\\n' % (tend-tstart))\n tstart = time.time()\n return output",
"def refseq_based_clustering(self):\n self.refseq_based = Usefuls.NonRedSet.NonRedSetDict()\n for prey in self.ivv_info.Prey_info().preys():\n refseqid = self.get_refseq(prey)\n if refseqid:\n self.refseq_based.append_Dict(refseqid, prey)",
"def write_contigs(self, refs):\n\n contigs_group = self.f.create_group('contigs')\n\n for ref_name, ref in refs:\n contig = contigs_group.create_group(ref_name)\n contig.attrs['name'] = ref_name\n contig.attrs['seq'] = ref\n contig.attrs['len'] = len(ref)",
"def get_cds(geneid, seqdict):\n nuc_seq = seqdict[geneid]\n # Translate it\n aa_seq = nuc_seq.seq.translate()\n # Decorate it like you would a full SeqRecord object\n aa_seq_rec = SeqRecord.SeqRecord(\n aa_seq,\n id=geneid,\n description='')\n return aa_seq_rec",
"def get_all_refseq(self):\n return self.refseq_based.keys()",
"def get_all_refseq(self):\n return self.refseq_based.keys()",
"def load_reference(self, path_to_reference):\n with open(path_to_reference,'r') as f:\n qids_to_relevant_docids = self.load_reference_from_stream(f)\n return qids_to_relevant_docids",
"def create_reference(filename):\n contigs = {}\n with open(filename, \"r\") as f:\n header = \"\"\n seq = \"\"\n for line in f:\n if line[0] == \">\":\n if header:\n contigs[header] = seq\n header = line[1:].rstrip()\n seq = \"\"\n else:\n seq += line.rstrip()\n\n return contigs",
"def check_ref_strand(vcf_file, fasta_file, out_file, verbose = False):\n rdr = fasta_fai.Reader(fasta_file)\n vcf_in = pysam.VariantFile(vcf_file, mode = 'r')\n \n if out_file[-3:] == \".gz\":\n out_fs = gzip.open(out_file, mode = 'wt')\n else:\n out_fs = open(out_file, mode = 'w')\n \n outl = [\"CHR\", \"POS\", \"ID\", \"REF\", \"ALT\", \"STRAND\", \"REFSEQ\", \"TYPE\"]\n out = '\\t'.join(outl) + '\\n'\n out_fs.write(out)\n counter = 0\n for rec in vcf_in.fetch():\n counter = counter + 1\n if verbose and counter % 1000 == 0:\n print(counter, \"records\")\n vid = rec.id\n chr_name = rec.chrom\n ref = rec.alleles[0]\n if (len(rec.alleles) == 1):\n alt = '.'\n else:\n alt = rec.alleles[1]\n pos = rec.pos - 1\n refseq_base = rdr.get_seq(chr_name, pos, len(ref))\n strnd = 0\n refseq = 0\n vtype = 0\n if ref == refseq_base:\n strnd = 1\n refseq = 1\n elif alt == refseq_base:\n strnd = 1\n refseq = 2\n else:\n refc = compl(ref, reverse = True)\n altc = compl(alt, reverse = True)\n if refc == refseq_base:\n strnd = 2\n refseq = 1\n elif altc == refseq_base:\n strnd = 2\n refseq = 2\n if is_ambg(ref, alt):\n strnd = 0\n if len(ref) == 1 and len(alt) == 1:\n vtype = 1\n\n if len(ref) > 1 and len(alt) == 1:\n vtype = 2\n if len(ref) == 1 and len(alt) > 1:\n vtype = 3\n \n outl = [chr_name, str(pos+1), vid, ref, alt, STRAND[strnd], REFSEQ[refseq], VTYPE[vtype]]\n out = '\\t'.join(outl) + '\\n'\n out_fs.write(out)\n rdr.close()\n vcf_in.close()\n out_fs.close()\n if verbose:\n print(\"Finished\", counter, \"records\")",
"def get_load_references(self):\n for path, properties in self.load.items():\n yield CaseLoadReference(path=path, properties=list(properties))",
"def load_genomes(self, params, context=None):\n return self._client.call_method(\n 'ReferenceDataManager.load_genomes',\n [params], self._service_ver, context)",
"def fetch_fasta_from_genome(self, genome_refl):\n\n if not self.check_ref_type(genome_ref, ['KBaseGenomes.Genome']):\n raise ValueError(\"The given genome_ref {} is not a KBaseGenomes.Genome type!\")\n # test if genome references an assembly type\n # do get_objects2 without data. get list of refs\n ws = Workspace(self.ws_url)\n genome_obj_info = ws.get_objects2({\n 'objects': [{'ref': genome_ref}],\n 'no_data': 1\n })\n # get the list of genome refs from the returned info.\n # if there are no refs (or something funky with the return), this will be an empty list.\n # this WILL fail if data is an empty list. But it shouldn't be, and we know because\n # we have a real genome reference, or get_objects2 would fail.\n genome_obj_refs = genome_obj_info.get('data', [{}])[0].get('refs', [])\n\n # see which of those are of an appropriate type (ContigSet or Assembly), if any.\n assembly_ref = list()\n ref_params = [{'ref': genome_ref + \";\" + x} for x in genome_obj_refs]\n ref_info = ws.get_object_info3({'objects': ref_params})\n for idx, info in enumerate(ref_info.get('infos')):\n if \"KBaseGenomeAnnotations.Assembly\" in info[2] or \"KBaseGenomes.ContigSet\" in info[2]:\n assembly_ref.append(\";\".join(ref_info.get('paths')[idx]))\n\n if len(assembly_ref) == 1:\n return fetch_fasta_from_assembly(assembly_ref[0], self.ws_url, self.callback_url)\n else:\n raise ValueError(\"Multiple assemblies found associated with the given genome ref {}! \"\n \"Unable to continue.\")",
"def ProcessEachLocus(strtablefile, genome, refkeys):\n f = open(strtablefile,\"r\")\n line = f.readline()\n while line != \"\":\n items = line.strip().split(\"\\t\")\n chrom = items[0]\n try:\n refchrom = refkeys[chrom]\n except:\n sys.stderr.write(\"ERROR: Chromosome %s not in reference fasta\\n\"%chrom)\n sys.exit(1)\n start = int(items[1])\n end = int(items[2])\n period = int(items[3])\n length = float(items[4])\n maxscore = int(period*length)*2\n motif = items[-1].strip()\n if maxscore > 0:\n score = float(items[8])/maxscore\n try:\n flanks = ExtractFlanks(genome,refchrom,start,end)\n gc = GetGC(flanks)\n entropy = GetEntropy(flanks)\n print \"\\t\".join(map(str,[chrom,start,end,score,gc,entropy]))\n except: pass\n line = f.readline()\n f.close()",
"def fetchReferences(self, dataRef, exposure):\n skyMap = dataRef.get(self.dataPrefix + \"skyMap\", immediate=True)\n tractInfo = skyMap[dataRef.dataId[\"tract\"]]\n patch = tuple(int(v) for v in dataRef.dataId[\"patch\"].split(\",\"))\n patchInfo = tractInfo.getPatchInfo(patch)\n references = lsst.afw.table.SourceCatalog(self.references.schema)\n references.extend(self.references.fetchInPatches(dataRef, patchList=[patchInfo]))\n return references",
"def get_refseq_allele(vcf_file, fasta_file, out_file, verbose = False):\n\n rdr = fasta_fai.Reader(fasta_file)\n vcf_in = pysam.VariantFile(vcf_file, mode = 'r')\n \n vcf_out = pysam.VariantFile(out_file, mode = 'w')\n for r in vcf_in.header.records:\n vcf_out.header.add_record(r)\n \n counter = 0\n for rec in vcf_in.fetch():\n counter = counter + 1\n if verbose and counter % 1000 == 0:\n print(counter, \"records\")\n rec_out = vcf_out.new_record()\n rec_out.id = rec.id\n rec_out.pos = rec.pos\n rec_out.chrom = rec.chrom\n o = list(range(len(rec.alleles)))\n orv = list(reversed(o))\n ref_i = 0\n for i in orv:\n a = rec.alleles[i]\n refseq_base = rdr.get_seq(rec.chrom, rec.pos-1, len(a))\n if a == refseq_base:\n ref_i = i\n o[ref_i] = 0\n o[0] = ref_i\n alleles = list()\n for i in o:\n alleles.append( rec.alleles[i] )\n if (len(alleles) == 1):\n alleles.append('.')\n rec_out.alleles = tuple(alleles)\n alleles_set = set(rec.alleles)\n vcf_out.write(rec_out)\n rdr.close()\n vcf_in.close()\n vcf_out.close()\n if out_file[-3:] == \".gz\":\n pysam.tabix_index(out_file, preset = \"vcf\", force = True)",
"def load_reference_from_stream(self, f):\n qids_to_relevant_docids = {}\n for l in f:\n vals = l.strip().split('\\t')\n if len(vals) != 4:\n vals = l.strip().split(' ')\n if len(vals) != 4:\n pdb.set_trace()\n raise IOError('\\\"%s\\\" is not valid format' % l)\n\n qid = vals[0]\n if qid in qids_to_relevant_docids:\n pass\n else:\n qids_to_relevant_docids[qid] = []\n _rel = int(vals[3])\n if _rel > 0:\n qids_to_relevant_docids[qid].append(vals[2])\n\n return qids_to_relevant_docids"
] | [
"0.63969433",
"0.6097736",
"0.5661921",
"0.56599265",
"0.5587677",
"0.5557396",
"0.5501603",
"0.5493998",
"0.54772866",
"0.5400377",
"0.53919554",
"0.53289264",
"0.53228486",
"0.52766865",
"0.52260077",
"0.5225814",
"0.5206542",
"0.5159658",
"0.5155864",
"0.5155864",
"0.51552695",
"0.51303065",
"0.51283324",
"0.5104764",
"0.5085375",
"0.5073704",
"0.5066056",
"0.5049709",
"0.50272363",
"0.5009819"
] | 0.64372444 | 0 |
Create event files, having milliseconds data of running train at every interval of one minute | def runEventCreation():
config = CONFIG['steps']['EventCreation']
ci = config['inputs']
co = config['outputs']
min_window_size = ci['min_window_size']
change_speed_by = ci['change_speed_by']
speed_ratio = ci['train_zero_speed_ratio']
datetime_limit = ci['datetime_limit']
csv_name_prefix = ci['csv_name_prefix']
input_bucket = ci['bucket']
window_event_bucket = ci['window_event_bucket']
window_events_file = ci['window_events_file']
output_bucket = co['bucket']
event_dir = co['event_dir']
filename_include = co['filename_include']
minio_config = CONFIG['artifacts']['minio']
minioClient = create_minio_client(minio_config["endpoint_url"],
access_key=minio_config["access_key"],
secret_key=minio_config["secret_key"],
secure=minio_config['secure'])
boto_client = boto3.client("s3",
endpoint_url=minio_config["endpoint_url"],
aws_access_key_id=minio_config["access_key"],
aws_secret_access_key=minio_config["secret_key"],
region_name=minio_config["region_name"])
csv_files = get_files(input_bucket, boto_client,
file_type='csv', prefix='filtered')
csv_files = ['filtered/7016_2020-09-09.csv']
create_window_event(files=csv_files,
input_bucket=input_bucket,
output_bucket=output_bucket,
minio_client=minioClient,
min_window_size=min_window_size,
ouput_dir=event_dir,
window_event_bucket=window_event_bucket,
window_events_file=window_events_file,
csv_name_prefix=csv_name_prefix,
change_speed_by=change_speed_by,
train_zero_speed_ratio=speed_ratio,
datetime_limit=datetime_limit,
filename_include=filename_include) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\r\n # handle arguments\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument('-t', '--time', help = 'start time', default = \"2018-12-26 18:11:08.509654\")\r\n parser.add_argument('-bd', '--min_duration', type = int, help = 'minimum duration', default = 25)\r\n parser.add_argument('-td', '--max_duration', type = int, help = 'maximum duration', default = 70)\r\n parser.add_argument('-e', '--events', type = int, help = 'how many events to generate', default = 1000)\r\n\r\n args = parser.parse_args()\r\n\r\n f = open(f\"tests/test_1.json\", \"a\")\r\n\r\n string_time = \"2019-07-08 10:40:00.423123\"\r\n\r\n current_time = datetime.datetime.strptime(string_time, '%Y-%m-%d %H:%M:%S.%f')\r\n\r\n for i in range(0, args.events):\r\n\r\n duration = random.randint(args.min_duration, args.max_duration)\r\n\r\n json = \"{\\\"timestamp\\\": \\\"\" \\\r\n + str(current_time) \\\r\n + \"\\\", \\\"translation_id\\\": \\\"5aa5b2f39f7254a75aa5\\\", \" \\\r\n \"\\\"source_language\\\": \\\"en\\\",\\\"target_language\\\":\" \\\r\n \" \\\"fr\\\",\\\"client_name\\\": \\\"easyjet\\\",\\\"event_name\\\":\" \\\r\n \"\\\"translation_delivered\\\",\\\"nr_words\\\": 30, \\\"duration\\\": \"\\\r\n + str(duration) + \"}\\n\"\r\n\r\n f.write(json)\r\n\r\n minutes = random.randint(0, 59)\r\n seconds = random.randint(0, 59)\r\n\r\n current_time += datetime.timedelta(minutes=minutes, seconds=seconds)\r\n\r\n print(f\"New file is located at inputs/{args.events}.json\")",
"def write_to_file(train_file, test_file, log_dict):\n i = 0\n train_events = []\n test_events = []\n\n for key in log_dict:\n trace = log_dict[key]\n if random.randint(0,1) == 0: # Add file to training set with 50% chance\n for e_idx in range(len(trace)):\n train_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",0,None\")\n else: # Add file to test set\n if random.randint(0,100) > 50: # No anomaly injection with 50% chance\n for e_idx in range(len(trace)):\n test_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",0,None\")\n else: # Anomaly injection\n trace, types = introduce_anomaly(trace, single=False)\n for e_idx in range(len(trace)):\n test_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",1,\\\"\" + str(types) + \"\\\"\")\n\n with open(train_file, \"w\") as fout:\n fout.write(\",\".join([\"Time\", \"Activity\", \"Resource\", \"Weekday\", \"Case\", \"Anomaly\", \"Type\"]) + \"\\n\")\n for e in train_events:\n fout.write(e + \"\\n\")\n\n with open(test_file, \"w\") as fout:\n fout.write(\",\".join([\"Time\", \"Activity\", \"Resource\", \"Weekday\", \"Case\", \"Anomaly\", \"Type\"]) + \"\\n\")\n for e in test_events:\n fout.write(e + \"\\n\")",
"def generate_ev_file(id_test):\n print(\"generate_ev_file\")\n \n ev_output_file_name=id_test+\".ev\"\n ev_input_file_name=id_test+\"_events.csv\"\n f_output = io.open(INPUT_PARSER_RESULTS_DIR+ev_output_file_name, \"w\",newline='\\n')\n f_input = io.open(AGRODEVS_INPUT_DIR+ev_input_file_name, \"r\")\n \n input_reader = csv.reader(f_input, delimiter=',')\n field_names_list = next(input_reader)\n if (field_names_list[0]!=\"campaign\"):\n print(\"First field of events file input should be 'campaing' but is:\"+field_names_list[0])\n print(\"Cannot generate event file\")\n return\n else:\n print(field_names_list)\n for line in input_reader:\n #generate timestamp for campaign\n #campania = int(int(ms)/100)+int(ss)*10+int(mm)*600+int(hh)*36000\n campaign = int(line[0])\n ms = (campaign*100)%1000\n ss = ((campaign*100)//1000)%60\n mm = ((campaign*100)//60000)%60\n hh = ((campaign*100)//360000)\n timeFormat = \"{:0>2d}\"\n msFormat = \"{:0>3d}\"\n timestamp_begin_event = str(timeFormat.format(hh))+\":\"+ \\\n str(timeFormat.format(mm))+\":\"+ \\\n str(timeFormat.format(ss))+\":\"+ \\\n str(msFormat.format(ms))\n timestamp_end_event = str(timeFormat.format(hh))+\":\"+ \\\n str(timeFormat.format(mm))+\":\"+ \\\n str(timeFormat.format(ss))+\":\"+ \\\n str(msFormat.format(ms+1))\n \n print(\"timestamp generated: \"+timestamp_begin_event)\n \n #generate events\n #begin events\n \n \n port_idx =0\n for event_port in line[1:]:\n port_idx=port_idx+1\n #print(\"processing port: \"+str(field_names_list[port_idx]))\n begin_event=CELL_DEVS_EXTERNAL_EVENT_BEGIN+ \\\n field_names_list[port_idx]+ \\\n \" \"+str(line[port_idx])\n \n f_output.write(timestamp_begin_event+\" \"+begin_event+\"\\n\")\n \n #end events\n port_idx=0\n for event_port in line[1:]:\n port_idx=port_idx+1\n #print(\"processing port: \"+str(field_names_list[port_idx]))\n end_event=CELL_DEVS_EXTERNAL_EVENT_ENDS+ \\\n field_names_list[port_idx]+ \\\n \" \"+str(line[port_idx])\n f_output.write(timestamp_end_event+\" \"+end_event+\"\\n\")\n \n \n \n f_input.close()\n f_output.close()",
"def INPUT_Periods_file(input):\n \n global events\n \n tb = 3600\n ta = 3600\n \n Period = input['min_date'].split('T')[0] + '_' + \\\n input['max_date'].split('T')[0] + '_' + \\\n str(input['min_mag']) + '_' + str(input['max_mag'])\n eventpath = os.path.join(input['datapath'], Period)\n \n len_events = len(events)\n \n input_period = open(os.path.join(os.getcwd(), 'INPUT-Periods'), 'a+')\n\n for i in range(0, len_events):\n \n str_event = str(events[i]['datetime']-tb) + '_' + \\\n str(events[i]['datetime']+ta) + '_' + \\\n str(events[i]['magnitude'] - 0.01) + '_' + \\\n str(events[i]['magnitude'] + 0.01) + '\\n'\n input_period.writelines(str_event)\n \n input_period.close()\n \n print '************************************************************' \n print 'New INPUT-Periods file is generated in your folder.'\n print 'Now, you could run the program again based on your desired event :)' \n print '************************************************************'\n \n sys.exit()",
"def collect_samples(self):\n self.__running = True\n with open(self.__filename, 'a') as output:\n next_sample_time = time.time()\n while self.__running:\n sensor_name = self.__sensor.get_sensor_type_name()\n sensor_id = self.__sensor.get_sensor_id()\n data = self.__sensor.retrieve_data_string() \n if DEBUG:\n print('data: \"{}\"'.format(data),\n file = sys.stderr, flush=True)\n when = datetime.datetime.now(datetime.timezone.utc).isoformat()\n result = OUTPUT_FORMAT.format(when,\n sensor_name, \n sensor_id, \n data)\n output.write(result)\n output.flush()\n \n next_sample_time = next_sample_time + self.__interval\n delay_time = next_sample_time - time.time()\n if DEBUG:\n print('delay_time = {}'.format(delay_time),\n file=sys.stderr, flush=True)\n \n if 0 < delay_time: # don't sleep if already next sample time\n time.sleep(delay_time)",
"def _pre_create_runs_and_time_series(self):\n self._logdir_loader_pre_create.synchronize_runs()\n run_to_events = self._logdir_loader_pre_create.get_run_events()\n if self._run_name_prefix:\n run_to_events = {\n self._run_name_prefix + k: v for k, v in run_to_events.items()\n }\n\n run_names = []\n run_tag_name_to_time_series_proto = {}\n for (run_name, events) in run_to_events.items():\n run_names.append(run_name)\n for event in events:\n _filter_graph_defs(event)\n for value in event.summary.value:\n metadata, is_valid = self._request_sender.get_metadata_and_validate(\n run_name, value\n )\n if not is_valid:\n continue\n if metadata.data_class == summary_pb2.DATA_CLASS_SCALAR:\n value_type = (\n tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR\n )\n elif metadata.data_class == summary_pb2.DATA_CLASS_TENSOR:\n value_type = (\n tensorboard_time_series.TensorboardTimeSeries.ValueType.TENSOR\n )\n elif metadata.data_class == summary_pb2.DATA_CLASS_BLOB_SEQUENCE:\n value_type = (\n tensorboard_time_series.TensorboardTimeSeries.ValueType.BLOB_SEQUENCE\n )\n\n run_tag_name_to_time_series_proto[\n (run_name, value.tag)\n ] = tensorboard_time_series.TensorboardTimeSeries(\n display_name=value.tag,\n value_type=value_type,\n plugin_name=metadata.plugin_data.plugin_name,\n plugin_data=metadata.plugin_data.content,\n )\n\n self._one_platform_resource_manager.batch_create_runs(run_names)\n self._one_platform_resource_manager.batch_create_time_series(\n run_tag_name_to_time_series_proto\n )",
"def on_epoch_start(self):",
"def example_data_file():\n\n header1 = \"#Sample Interval: 0.100000 (seconds)\"\n header2 = \"Timestamp,AccelX,AccelY,RateX,RateY\"\n header3 = \"dd-mmm-yyyy HH:MM:SS.FFF,mm/s2,mm/s2,rad/s,rad/s\"\n\n start_date = dt.datetime(2016, 3, 17, 1, 0, 0)\n\n # Add one tenth of a second\n time_delta = dt.timedelta(0, 0, 0, 100)\n\n # Sample frequency in Hz\n sample_freq = 10\n\n # 20 in event duration in seconds\n Ts = 60 * 20\n\n # Number of points\n N = Ts * sample_freq\n\n # Array of times\n time = [start_date + i * time_delta for i in range(N)]\n time_str = [t.strftime(\"%Y-%m-%d %H:%M:%S.%f\") for t in time]\n\n ax, ay, Rx, Ry = example_data(sample_freq, Ts)\n\n data = [\n \",\".join([time_str[i], str(ax[i]), str(ay[i]), str(Rx[i]), str(Ry[i])]) for i in range(N)\n ]\n\n data.insert(0, header3)\n data.insert(0, header2)\n data.insert(0, header1)\n\n return \"\\n\".join(data)",
"def testMoreEvents(self):\n splitter = SplitterFactory()\n jobFactory = splitter(self.singleFileSubscription)\n\n jobGroups = jobFactory(events_per_job=1000,\n performance=self.performanceParams)\n\n self.assertEqual(len(jobGroups), 1)\n\n self.assertEqual(len(jobGroups[0].jobs), 1)\n\n for job in jobGroups[0].jobs:\n self.assertEqual(job.getFiles(type=\"lfn\"), [\"/some/file/name\"])\n self.assertEqual(job[\"mask\"].getMaxEvents(), self.eventsPerJob)\n self.assertEqual(job[\"mask\"][\"FirstEvent\"], 0)\n self.assertEqual(job[\"mask\"][\"LastEvent\"], 99)",
"def job_gen(self, time_frame):",
"def generate(start_date, episodes, steps, output_file):\n header = ','.join(FIELDS) + '\\n'\n with open(output_file, 'w') as fd:\n fd.write(header)\n data_arrays = []\n first_dp = generate_data_point(start_date)\n data_arrays.append(first_dp)\n\n interval = int(1440/steps)\n cur_ts = increment_ts(start_date, interval)\n\n while step_diff(start_date, cur_ts, interval) < steps*episodes:\n dp_tmp = generate_data_point(cur_ts)\n data_arrays.append(dp_tmp)\n cur_ts = increment_ts(cur_ts, interval)\n\n for dp in data_arrays:\n row = ','.join(dp) + '\\n'\n fd.write(row)",
"def on_epoch_begin(self, epoch, logs=None):",
"def on_epoch_begin(self, epoch, logs=None):",
"def read_and_save_timestamps(path, filename=\"saved\", divisor=4):\n osu_dict, wav_file = read_osu_file(path, convert=True)\n data, flow_data = get_map_notes(osu_dict, divisor=divisor)\n timestamps = [c[1] for c in data]\n with open(filename + \"_ts.json\", \"w\") as json_file:\n json.dump(np.array(timestamps).tolist(), json_file)",
"def test_load_events_split(self):\n command = \\\n '{0} -g --min-length 0'.format(\n os.path.join(self.datadir, 'monol_testB.evt'))\n hen.read_events.main(command.split())\n new_filename = os.path.join(self.datadir,\n 'monol_testB_nustar_fpmb_gti000_ev' +\n HEN_FILE_EXTENSION)\n assert os.path.exists(new_filename)",
"def on_train_end(self, logs=None):",
"def on_train_end(self, logs=None):",
"def aer_raw_events_from_file(filename):\n events = aer_load_from_file(filename)\n count = 0\n for ts_mus, x, y, s in events:\n a = np.zeros(dtype=aer_raw_event_dtype, shape=())\n a['timestamp'] = ts_mus / (1000.0 * 1000.0)\n a['x'] = x \n a['y'] = y\n a['sign'] = s\n yield a\n count += 1",
"def gen_simple_test():\n count = 1\n mdict = {\n 'operating_frequency': 3e8,\n 'sample_rate': 8e3,\n 'signal': [1] * 5,\n 'origin_pos': [1000, 0, 0],\n 'dest_pos': [300, 200, 50],\n 'origin_vel': [0] * 3,\n 'dest_vel': [0] * 3,\n }\n io.savemat('{}{}_input'.format(tests_path, count), mdict)",
"def split_timeseries_and_save(self, window_length=45, zero_padding=True, tmp_dir=os.path.join(\"..\", \"..\", \"data\", \"interim\")):\n #TODO: split from task event file\n\n label_df = pd.DataFrame(columns=[\"label\", \"filename\"])\n out_file = os.path.join(tmp_dir, \"{}_{:03d}.npy\")\n\n # Split the timeseries\n for ii in range(len(self.valid_ts_filepaths)):\n ts = self.get_valid_timeseries([ii])[0]\n ts_duration = ts.shape[0]\n rem = ts_duration % window_length\n if rem == 0:\n n_splits = int(ts_duration / window_length)\n else:\n if zero_padding:\n n_splits = np.ceil(ts_duration / window_length)\n pad_size = int(n_splits*window_length - ts_duration)\n pad_widths = [(0, pad_size), (0, 0)]\n ts = np.pad(ts, pad_width=pad_widths)\n else:\n ts = ts[:(ts_duration-rem), :]\n n_splits = np.floor(ts_duration / window_length)\n split_ts = np.split(ts, n_splits)\n\n # tmp = [split_timeseries(t,n_timepoints=n_timepoints) for t in timeseries]\n # for ts in tmp:\n # split_ts = split_ts + ts\n\n # #keep track of the corresponding labels\n # n = int(timeseries[0].shape[0]/n_timepoints)\n # split_labels = []\n # for l in labels:\n # split_labels.append(np.repeat(l,n))\n\n # #add a label for each split\n # split_labels.append(list(range(n))*len(timeseries))\n # return split_ts, split_labels",
"def process_events(cat_data, n_run, cfg, sta_locs):\n import time\n import os\n import shutil\n import sys\n import logging\n from obspy import read\n from obspy.geodetics.base import gps2dist_azimuth\n import matplotlib.pyplot as plt\n\n if cfg.output.FORCE_RECALC is True:\n w = open(\"refined_events.dat\", \"w\")\n w.close()\n if cfg.plotting.DO_PLOT_1 is True or cfg.plotting.DO_PLOT_2 is True:\n fig = plt.figure(figsize=(18, 10))\n else:\n fig = []\n # Prepare directory\n if (os.path.exists(\"runs/run{:}\".format(n_run))\n and os.path.isdir(\"runs/run{:}\".format(n_run))):\n shutil.rmtree(\"runs/run{:}\".format(n_run))\n copytree(\"NLLOC_run\", \"runs/run{:}\".format(n_run))\n os.chdir(\"runs/run{:}\".format(n_run))\n for n_ev, ev in enumerate(cat_data):\n start = time.time()\n ev_id = ev.event_descriptions[0].text\n sys.stdout.flush()\n ev_dict = {}\n ev_dict[\"stations\"] = {}\n orig_lat, orig_lon = [ev.origins[0].latitude, ev.origins[0].longitude]\n logging.debug(\"startint logging\")\n st = read(\"../../{:}/{:}/MSEED/*.msd\".format(\n cfg.input.DIR_TO_EVENTDIRS, ev_id), format=\"MSEED\")\n print(n_run, ev_id)\n for n_tr, tr in enumerate(st):\n if st[n_tr].stats.sampling_rate > 40.0:\n try:\n st[n_tr].resample(40)\n except ZeroDivisionError:\n continue\n st1, st2, st_mag = [st.copy(), st.copy(), st.copy()]\n # Append distance to trace\n stations_data = sorted(set([tr.stats.station for tr in st\n if tr.stats.station not in\n cfg.sta_select.STA_BLACKLIST]))\n stations_dist = {sta_code: gps2dist_azimuth(\n sta_locs[sta_code][\"lat\"], sta_locs[sta_code][\"lon\"],\n orig_lat, orig_lon)[0] for sta_code in stations_data\n if gps2dist_azimuth(\n sta_locs[sta_code][\"lat\"], sta_locs[sta_code][\"lon\"],\n orig_lat, orig_lon)[0]/1000 <= cfg.sta_select.MAX_DIST}\n path_to_figs = \"../../{:}/{:}/figs\".format(\n cfg.input.DIR_TO_EVENTDIRS, ev_id)\n if not os.path.exists(path_to_figs):\n os.mkdir(path_to_figs)\n print(\"Doing first refinement\")\n sys.stdout.flush()\n if (\"R\" in cfg.picking.CMPS_REFINE_1[\"S\"] or\n \"T\" in cfg.picking.CMPS_REFINE_1[\"S\"]):\n rot = True\n else:\n rot = False\n evt_refine_1, rms, found = refine_events(\n st1, stations_dist, cfg.picking.CMPS_REFINE_1,\n cfg.picking.MAX_PICK_DIFF_REFINE1, ev,\n cfg.ploting.DO_PLOT_1, 1, fig, \"const\", path_to_figs, ev_dict,\n ev_id, cfg, rot\n )\n if found is False:\n continue\n print(\"RMS = \", rms)\n sys.stdout.flush()\n prev_rms = rms\n print(\"Doing second refinement\")\n sys.stdout.flush()\n if (\"R\" in cfg.picking.CMPS_REFINE_2[\"S\"] or\n \"T\" in cfg.picking.CMPS_REFINE_2[\"S\"]):\n rot = True\n else:\n rot = False\n evt_refine_2, rms, found = refine_events(\n st2, stations_dist, cfg.picking.CMPS_REFINE_2,\n cfg.picking.MAX_PICK_DIFF_REFINE2, evt_refine_1,\n cfg.plotting.DO_PLOT_2, 2, fig, \"dist\", path_to_figs, ev_dict,\n ev_id, rot\n )\n if found is False:\n continue\n print(\"RMS = \", rms)\n if rms > prev_rms * 1.25:\n print(\"RMS is significantly increasing (*25%) - skipping event\")\n continue\n prev_rms = rms\n evt_refine_2 = compute_magnitude(evt_refine_2, st_mag, cfg)\n write_evt(evt_refine_2, ev_id)\n end = time.time()\n print(\"Time taken for event: {:3.1f} mins\".format((end-start)/60))",
"def on_train_begin(self, logs=None):",
"def on_train_begin(self, logs=None):",
"def on_train_begin(self, logs=None):\n self.start_time = datetime.datetime.now()\n print(f\"Starting training at {self.start_time}\")",
"def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.opt.num_epochs):\n self.run_epoch()\n if (self.epoch + 1) % self.opt.save_frequency == 0:\n self.save_model()",
"def _save_events_summary(self):\n for name, events in self._events.items():\n dict_events = [event.to_dict() for event in events]\n dump_data(dict_events, self._make_event_filename(name))",
"def train_predict(train_file_arg):\n\n # Load command line arguments\n train_file = train_file_arg\n parameter_file = \"prophet/lstm/trainingConfig.json\"\n\n # Load training parameters\n params = json.loads(open(parameter_file).read())\n\n # Load time series dataset, and split it into train and test\n x_train, y_train, x_test, _, x_test_raw, _,\\\n last_window_raw, last_window, last_datetime_epoch = dataHelper.load_timeseries(train_file, params)\n\n # Build RNN (LSTM) model\n lstm_layer = [1, params[\"window_size\"], params[\"hidden_unit\"], 1]\n model = buildModel.rnn_lstm(lstm_layer, params)\n\n # Train RNN (LSTM) model with train set\n model.fit(\n x_train,\n y_train,\n batch_size=params[\"batch_size\"],\n epochs=params[\"epochs\"],\n validation_split=params[\"validation_split\"])\n\n # Check the model against test set\n predicted = buildModel.predict_next_timestamp(model, x_test)\n predicted_raw = []\n for i in range(len(x_test_raw)):\n predicted_raw.append((predicted[i] + 1) * x_test_raw[i][0])\n\n # Predict next time stamp\n next_timestamp = buildModel.predict_next_timestamp(model, last_window)\n next_timestamp_raw = (next_timestamp[0] + 1) * last_window_raw[0][0]\n print(\"The next time stamp forecasting is: {}\".format(next_timestamp_raw))\n\n # Add 5 minutes for a new timestamp of predictions\n last_datetime = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(last_datetime_epoch))\n last_datetime = datetime.strptime(last_datetime, \"%Y-%m-%d %H:%M:%S\")\n new_datetime = last_datetime + timedelta(seconds=300)\n new_datetime_epoch = time.mktime(new_datetime.timetuple())\n new_datetime = new_datetime.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n # Concatenate datetime and price forecast\n new_entry = \"coin_name,\" + str(new_datetime_epoch) + \",\" \\\n + str(next_timestamp_raw) + \",coin_supply\" + \",coin_mc\" + \"\\n\"\n\n # Write to CSV file of new prediction\n fd = open(train_file, \"a\")\n fd.write(new_entry)\n fd.close()\n\n # Return new prediction\n return [new_datetime, str(next_timestamp_raw)]",
"def MakeFiles(arguments):\n # Unpack arguments\n process, counter, path, start, stop = arguments\n\n log.info(\"[{}] Importing data from {}\".format(process,path))\n hf = h5py.File(path, \"r\")\n\n data = np.empty((0,len(column_names)), float)\n\n # Total number of events in batch\n n_events = stop-start\n\n for i, event in enumerate(np.arange(start,stop)):\n # Print information on progress\n if i%100==0:\n log.info(\"[{}] {} of {} events examined\".format(process,i,n_events))\n\n # Number of muons in event\n nPho = np.shape(hf[ 'pho_truthType' ][ event ])[0]\n\n for pho in range(nPho):\n #log.info(\"[{}] Number of muons is {} \".format(process,nMuo))\n\n data_temp = np.zeros((1,len(column_names)))\n\n\n # Add event variables to array\n data_temp[ 0, column_names.index( 'NvtxReco' ) ] = np.int(hf['NvtxReco'][event])\n data_temp[ 0, column_names.index( 'correctedScaledAverageMu' ) ] = hf[ 'correctedScaledAverageMu' ][ event ]\n data_temp[ 0, column_names.index( 'correctedScaledActualMu' ) ] = hf[ 'correctedScaledActualMu' ][ event ]\n # Add muon variables to array\n\n addPhotonVariables(hf, event, data_temp, pho)\n\n data = np.append(data, data_temp, axis=0)\n\n\n return data",
"def test_1000_populations_with_activity_one_per_cycle():\n\n with path.tempdir() as log_parent_folder:\n log_folder = os.path.join(log_parent_folder, \"logs\")\n\n run_test_scenario_1(clock_step=\"15 min\",\n simulation_duration=\"10days\",\n n_stories=1,\n per=pd.Timedelta(\"1 day\"),\n log_folder=log_folder)\n\n logging.info(\"loading produced logs\")\n logs = load_all_logs(log_folder)[\"the_logs\"]\n\n logging.info(\"number of produced logs: {} logs\".format(logs.shape[0]))\n\n # 10 days of simulation should produce 1000 * 1 * 10 == 10000 logs\n assert 9500 <= logs.shape[0] <= 10500",
"def save_dataset(self):\n if os.path.exists(self.output_path):\n print('Directory already exists. EXITING.')\n sys.exit()\n if not os.path.exists(self.output_path): os.mkdir(self.output_path)\n for window_num, cur_window in enumerate(self.time_windows):\n window_dir = (os.path.join(self.output_path, ('window %s' % str(window_num + 1))))\n if not os.path.exists(window_dir): os.mkdir(window_dir)\n for filepath in cur_window:\n topic = os.path.basename(os.path.dirname(filepath))\n topic_dir = os.path.join(os.path.join(window_dir, topic))\n if not os.path.exists(topic_dir): os.mkdir(topic_dir)\n copy(filepath, topic_dir)\n self.ground_truth.append((len(os.listdir(window_dir))))"
] | [
"0.6032952",
"0.5831476",
"0.57964736",
"0.579341",
"0.56763124",
"0.5651286",
"0.55910707",
"0.5543689",
"0.550672",
"0.5505693",
"0.54488367",
"0.5447712",
"0.5447712",
"0.54450935",
"0.5443203",
"0.5438387",
"0.5438387",
"0.54381704",
"0.54252434",
"0.53706056",
"0.5366133",
"0.5361904",
"0.5361904",
"0.5354834",
"0.53396297",
"0.53329194",
"0.5327918",
"0.5326494",
"0.5315854",
"0.53058153"
] | 0.6412924 | 0 |
Compute an n x n Mandelbrot matrix with maxi maximum iterations. | def mandel_numpy(n=400,maxi=512):
# get 2-d arrays for x and y, using numpy's convenience function
xs, ys = N.meshgrid(N.linspace(x0,x1,n), N.linspace(y0,y1,n))
z = N.zeros((n,n),'complex128') # a matrix of complex zeros
c = xs + 1j*ys
escape = N.empty((n,n),'int32')
escape[:,:] = maxi # default result
for i in range(1, maxi):
mask = (escape == maxi) # find out which points have not escaped
# yet (results in a boolean array)
z[mask] = z[mask]**2 + c[mask] # run the Mandelbrot iteration only
# on those points, using boolean indexing
escape[mask & (N.abs(z) > 2)] = i # if are just escaping now,
# set the result to this iteration
return escape | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_mandelbrot(self, iterations):\n if self.grid is None:\n raise RuntimeError(\"Grid hasn't been setup - call set_grid first.\")\n # Define the tensorflow variables\n c = tf.constant(self.grid.astype(np.complex64))\n z = tf.Variable(c)\n n = tf.Variable(tf.zeros_like(c, tf.float32))\n # Start the tensorflow session\n with tf.Session():\n tf.global_variables_initializer().run()\n # Define the main mandelbrot algorithm - either take the square plus x, or keep z\n z_out = tf.where(tf.abs(z) < self.threshold, z ** 2 + c, z)\n not_diverged = tf.abs(z_out) < self.threshold\n # Create a group of tensorflow operations\n step = tf.group(z.assign(z_out), n.assign_add(tf.cast(not_diverged, tf.float32)))\n # Run the operations for a set number of steps\n for i in range(iterations):\n step.run()\n self.end_step = n.eval()\n self.end_z = z_out.eval()",
"def mandelbrot(c, max_iter=50, div_lim=2):\n f = 0j\n n = 0\n out = zeros_like(c,dtype=int)\n while n < max_iter:\n n += 1\n f = func(f,c)\n out[(out == 0) & (abs(f) > div_lim)] = n\n f[(out != 0)] = 100\n return(out)",
"def request_mandelbrot_computation(self):\n # Get array bounds\n xmin, xmax = self.mpl_mandelbrot.axes.get_xlim()\n ymin, ymax = self.mpl_mandelbrot.axes.get_ylim()\n # Get the number of iterations from the GUI\n try:\n maxiter = int(self.maxiter_lineedit.text())\n self.maxiter = maxiter\n except ValueError: pass\n # Send info to mandelbrot_thread\n self.mandelbrot_queue.put((xmin, xmax, ymin, ymax, self.maxiter))",
"def mandelbrot(width, height, colors, inf_color, zoom = 2, xshift = 0.5, yshift = 0.0):\n # Create an image array.\n image = np.zeros((height, width, 3), dtype=np.uint8)\n \n m = int(width / 2) # Half image width\n n = int(height / 2) # half image height\n \n mx = 200 # Maximum number of iterations\n clrlen = (len(colors) - 1) / mx # multiplicator for the color mapping\n \n # y is a coordinate on the image plane going from top to bottom.\n for y in range(height):\n # y2 is a coordinate on the Mandelbrot plane going from bottom to top.\n y2 = (n - y) / height * zoom - yshift\n \n # x is a coordinate on the image plane going from left to right.\n for x in range(width):\n # x2 is a coordinate on the Mandelbrot plane going from left to right.\n x2 = (x - m) / height * zoom - xshift\n\n # Reset the Mandelbrot iterations\n zx = 0\n zy = 0\n\n # Precalculations of products for first iteration\n xx = zx * zx\n yy = zy * zy\n xy = zx * zy\n\n # Use infinity color first.\n image[y][x] = inf_color\n \n for c in range(mx):\n zx = xx - yy + x2\n zy = xy + xy + y2\n\n # Precalculations of products for next iteration\n xx = zx * zx\n yy = zy * zy\n xy = zx * zy\n\n # Do the coordinates leave the allowed circle?\n # Pick a color for the number of iterations.\n if xx + yy > 4:\n image[y][x] = colors[int(round(c * clrlen))]\n break\n\n return image",
"def MM(x,N,n,l,t=0):\n Mat = np.zeros([2**l,2**l])\n for iii in range(N):\n Mat[iii,(x**n * iii)%N] = 1\n return Mat",
"def collatz(n):\n memo = dict()\n def collatz_recur(m):\n if m == 1:\n return 1\n # check in the memo\n if m in memo:\n return memo[m]\n\n if m % 2: # odd\n val = 3 * m + 1\n memo[m] = 1 + collatz_recur(val)\n return memo[m]\n else:\n val = m / 2\n memo[m] = 1 + collatz_recur(val)\n return memo[m]\n\n for i in xrange(2,n):\n collatz_recur(i)\n\n return memo",
"def mandelbrot(c):\n tolerance = 30\n threshold = 3\n\n # This is the mandelbrot set\n #Uncomment and comment the Julia set block below\n \n # M = lambda x: pow(x, 2) + c\n # nterm = M(0)\n\n # End Mandelbrot set\n\n # This is the Julia Set\n x = c\n c = complex(-0.4,0.6)\n M = lambda x: pow(x, 2) + c\n nterm = M(x)\n #End Julia Set\n\n for i in xrange(tolerance):\n try:\n nterm = M(nterm)\n except OverflowError:\n return 1\n\n square = lambda x: pow(x, 2)\n try:\n if (square(nterm.real) + square(nterm.imag)) < threshold:\n return 0\n else:\n return 1\n except OverflowError:\n return 1",
"def complex_matrix(n):\n\n matrix = np.zeros(shape=(n, n), dtype=complex)\n\n for row in np.arange(n):\n if row == 0:\n for col in np.arange(n):\n matrix[row, col] = 1\n else:\n for col in np.arange(n):\n if col == 0:\n matrix[row, col] = 1\n else:\n matrix[row, col] = roots(n, col*row)\n\n return matrix",
"def run_Over_Grid(numdemes = 2, reps = 10, numreg = 100, t = 1000):\n Nmean = 2000\n Nsd = 100\n migMean = 0.0001\n migsd = 1e-06\n ndc2 = numdemes * (numdemes - 1) / 2\n rows = ndc2 + numdemes + 1\n I = np.matrix(np.eye(rows))\n Ck = I[0:rows - 1, :]\n Dk = I[rows - 1, :]\n output = []\n for r in xrange(reps):\n N = np.random.normal(Nmean, Nsd, (numdemes,))\n mtemp = np.random.normal(migMean, migsd, (ndc2,))\n xtrue = np.hstack((N, mtemp))\n m = np.zeros((numdemes, numdemes))\n cnt = 0\n for i in xrange(numdemes):\n for j in xrange(i + 1, numdemes):\n m[i, j] = m[j, i] = mtemp[cnt]\n cnt += 1\n\n Ninv = [ 1.0 / x for x in N ]\n Qtrue = comp_pw_coal_cont(m, Ninv)\n Ptrue = expM(t * Qtrue)\n obs_rates = Ck * Ptrue * Dk.T\n if numreg > 0:\n sd_rates = np.real(np.sqrt(obs_rates.getA() * (1 - obs_rates).getA() / numreg))\n noise = np.random.normal(0.0, sd_rates)\n print 'Noise:\\n', noise\n N0 = np.random.normal(Nmean / 2.0, Nsd * 3.0, (numdemes,))\n m0 = np.random.normal(migMean / 2.0, migsd * 3.0, (ndc2,))\n x0 = np.hstack((N0, m0))\n xopt = opt.fmin(compute_Frob_norm_mig, x0, (t, obs_rates), maxfun=1000000, maxiter=100000)\n output.append((xtrue, xopt, linalg.norm(xopt - xtrue)))\n\n return output",
"def integrate_monte_carlo_nd(f, dim, limit, N=1000000):\n I, sum = 1/N, 0\n for n in range(dim):\n I *= (limit[n][1] - limit[n][0])\n\n for k in range(N):\n x = []\n for n in range(dim):\n x += [limit[n][0] + (limit[n][1] - limit[n][0])*rnd.random()]\n\n sum += f(x)\n return I*sum",
"def matrix_generate(n):\n a = np.eye(n)\n max = 0\n for i in range(n):\n for j in range(n):\n a[i][j] = random.randint(0,50)\n a[j][i] = a[i][j]\n if a[i][j] > max:\n max = a[i][j]\n for i in range(n):\n a[i][i] = max * n + random.randint(20,40)\n return np.array(a)",
"def getMandelbrotSet(self):\n\t\t\n\t\t(x_min, x_max, y_min, y_max) = [i for i in self.extent]\n\t\t(rows, columns) = (self.width*self.dpi, self.width*self.dpi)\n\t\tr_real = np.linspace(x_min, x_max, columns)\n\t\tr_imag = np.linspace(y_max, y_min, rows)\n\t\tresult = np.empty((rows, columns))\n\n\t\t# used for smoother colors\n\t\thorizon = 2.0**40\n\t\thorizon2 = horizon**2\n\t\tlog_horizon = np.log(np.log(horizon))/np.log(2)\n\n\t\tfor i in range(rows): # imaginary values\n\t\t\tfor j in range(columns): # real values\n\t\t\t\tresult[i,j] = _determineDivergence(r_real[j], r_imag[i], self.maxIter, horizon2, log_horizon)\n\t\treturn result",
"def get_mat_n(T_e, r_0, M, N, E, g_e, R_h, tmpN_t, tmpN_max):\n mat_n = np.zeros((M, len(T_e))) # matrix for the result\n for i in range(len(T_e)):\n R_e = get_R_e(E, M, g_e, T_e[-i-1]) # matrix with transition rates (env)\n #print get_R_e_test(E, M, g_e, T_e, R_e, 10e-15)\n R = get_R(R_e, R_h) # total transition rates\n data = (R, M, N) # arguments for fsolve \n #-----------solve the nonlinear system of equations-------------------- \n solution = fsolve(func, r_0,args=data, full_output=1)\n if solution[2] == 0: # if sol. didnt conv., repeat calcul.\n print i\n else:\n n1 = get_n1(solution[0],N) # occupation number of the ground state\n n = np.zeros(M) # vector of all occupation numbers\n n[0], n[1:] = n1 , solution[0] \n if np.any(n<0.): # if solution is unphysical \n print \"Needed to repeat calculation at Temperature T_e =\", T_e[-i-1] \n n = get_cor_n(i, T_e, r_0, M, N, E, g_e, R_h, tmpN_t, tmpN_max)\n if n == None:\n print \"Calculation failed! You may choose a larger tmpN_max.\"\n break\n else:\n r_0 = n[1:]\n else:\n r_0 = solution[0]\n mat_n[:,-i-1] = n\n return mat_n",
"def matI(a):\n shape=matShape(a)\n if shape[0]!=shape[1]: raise ValueError\n n=shape[0]\n ret=matZeros((n,n*2))\n for i in range(n):\n for j in range(n):\n matSet(ret,i,j,matGet(a,i,j))\n for i in range(n):\n matSet(ret,i,i+n,1)\n for row in range(n):\n rm=row\n ap=abs(matGet(ret,rm,row))\n for rint in range(row+1,n):\n p=abs(matGet(ret,rint,row))\n if ap<p:\n ap=p\n rm=rint\n if 0.000000001 > ap:\n return matCopy(a) # Not invertible\n di=matGet(ret,rm,row)\n if rm!=row:\n for i in range(n*2):\n t=matGet(ret,rm,i)\n matSet(ret,rm,i,matGet(ret,row,i))\n matSet(ret,row,i,t)\n idi=1.0/di\n for rint in range(row+1,n):\n f=idi*matGet(ret,rint,row)\n if f!=0:\n for co in range(row,n*2):\n matSet(ret,rint,co,matGet(ret,rint,co)-f*matGet(ret,row,co))\n row=n-1\n while row>=0:\n ic=1.0/matGet(ret,row,row)\n for rint in range(row):\n icx=ic*matGet(ret,rint,row)\n if icx!=0:\n for co in range(row, n*2):\n matSet(ret,rint,co,matGet(ret,rint,co)-icx*matGet(ret,row,co))\n matSet(ret,row,row,ic*matGet(ret,row,row))\n for co in range(n,n*2):\n matSet(ret,row,co,ic*matGet(ret,row,co))\n row-=1\n return matPart(ret,0,n,n,n*2)",
"def magma_icamax(n, dx, incx, queue):\n\n return _libmagma.magma_icamax(n, int(dx), incx, queue)",
"def TMM(x,N,n,trun_basis):\n Mat = np.zeros([len(trun_basis),len(trun_basis)])\n print('making TMM')\n perms = [int((x**n * iii)%N) for iii in trun_basis] # Modular multiplication\n for iii in range(len(trun_basis)):\n if trun_basis.__contains__(perms[iii]):\n Mat[iii,trun_basis.index(perms[iii])] = 1\n return Mat",
"def make_magic_square(N): # part a\n if N % 2 == 0:\n print('N must be odd.')\n my_magic_square = np.zeros((N, N))\n i = 0\n j = np.ceil(N / 2.).astype(int)\n n = 1\n while n <= N**2:\n my_magic_square[i, j] = n\n n += 1\n i_next =\n j_next =\n if my_magic_square[i_next, j_next] > 0:\n i =\n else:\n i =\n j =\n return my_magic_square",
"def g_iter(n):\n \"*** YOUR CODE HERE ***\"\n if n <= 3:\n return n\n else:\n i = 3\n x, y, z = 1, 2, 3\n new = 1\n while i < n:\n new = z + (2*y) + (3*x)\n x, y, z = y, z, new \n i += 1\n return new",
"def filled_grid(n):\n\n i = 0\n r, c = 1, 1\n while r * c < n:\n if i % 2:\n r += 1\n else:\n c += 1\n\n i += 1\n\n return r, c",
"def __init__(self, n):\r\n self.size = n\r\n self.mat = []\r\n for i in range(n):\r\n self.mat.append([0] * n)",
"def ECMFactor(n, sieve, limit, times):\n for _ in range(times):\n g = n\n while g == n:\n curve, point = randomCurve(n)\n g = gcd(n, curve.discriminant().remainder())\n \n if g > 1:\n return g\n\n for prime in sieve:\n prod = prime\n i = 0\n while prod < limit and i < 10:\n i += 1\n try:\n point = prime * point\n except ZeroDivisionError as e:\n return gcd(e.args[1], n)\n prod *= prime\n\n return n",
"def Nmatrix(init_par, alpha, delta, obs, sigma_obs, ccoef, N):\n\tparallax, v, sigma_v = init_par[:-4], init_par[-4:-1], init_par[-1] \n\tplx_obs, mualpha_obs, mudelta_obs = obs[:, 0], obs[:, 1], obs[:, 2]\n\n\tp, q, r = normalTriad(alpha, delta)\n\tmualpha_mod = np.dot(np.transpose(p),v)*parallax/_A\n\tmudelta_mod = np.dot(np.transpose(q),v)*parallax/_A\n\t\n\tplx_mod, mualpha_mod, mudelta_mod = parallax, mualpha_mod, mudelta_mod\n\tsigma_plx, sigma_mualpha, sigma_mudelta = np.transpose(sigma_obs)\n\ta,like, expo, detD = np.ones(N),np.ones(N),np.ones(N), np.ones(N) \n\tC = np.zeros((3,3,N),dtype=np.float64)\n\tC[0,0,:],C[1,1,:],C[2,2,:] = sigma_plx**2.,sigma_mualpha**2., sigma_mudelta**2.\n\tcorr_coefficient_plx_mualpha, corr_coefficient_plx_mudelta, corr_coefficient_mualpha_mudelta = np.zeros(N), np.zeros(N), np.zeros(N)\n\tcorr_coefficient_plx_mualpha[:], corr_coefficient_plx_mudelta[:], corr_coefficient_mualpha_mudelta[:] = ccoef[:, 0], ccoef[:, 1], ccoef[:, 2] \n\t\n\tC[0,1,:], C[0,2,:] = corr_coefficient_plx_mualpha*sigma_plx*sigma_mualpha, corr_coefficient_plx_mudelta*sigma_plx*sigma_mudelta\n\tC[1,0,:], C[1,2,:] = corr_coefficient_plx_mualpha*sigma_plx*sigma_mualpha, corr_coefficient_mualpha_mudelta*sigma_mualpha*sigma_mudelta\n\tC[2,0,:], C[2,1,:] = corr_coefficient_plx_mudelta*sigma_plx*sigma_mudelta, corr_coefficient_mualpha_mudelta*sigma_mualpha*sigma_mudelta\n\tE = np.zeros((3,3,N),dtype=np.float64)\n\tE[1,1,:],E[2,2,:] = (sigma_v**2.)*(parallax/_A)**2., (sigma_v**2.)*(parallax/_A)**2.\n\tD,invD = np.zeros((3,3,N),dtype=np.float64),np.zeros((3,3,N),dtype=np.float64)\n\tD = np.add(E,C)\n\tfor i in range(N):\n\t\tdetD[i] = matrix_det(D[:,:,i]) \n\t\tinvD[:,:,i] = matrix_inv(D[:,:,i])\n\t\t\n\ta_c = np.ones((3,N))\n\ta_c = [plx_obs - plx_mod, mualpha_obs - mualpha_mod, mudelta_obs-mudelta_mod]\n\t\n\t\n\n\t\n\tcprime_pi, cprime_vx, cprime_vy, cprime_vz, = np.ones((3,N)), np.ones((3,N)), \\\n\t\t\t\t\t\t\tnp.ones((3,N)), np.ones((3,N)), \n\tcprime_pi[0,:] = 1.\n\tcprime_pi[1,:] = np.dot(np.transpose(p),v)/_A\n\tcprime_pi[2,:] = np.dot(np.transpose(q),v)/_A\n\t\n\tcprime_vx[0,:] = 0.\n\tcprime_vx[1,:] = -np.sin(alpha)*plx_mod/_A \n\tcprime_vx[2,:] = -np.sin(delta)*np.cos(alpha)*plx_mod/_A\n\n\t\n\tcprime_vy[0,:] = 0.\n\tcprime_vy[1,:] = np.cos(alpha)*plx_mod/_A \n\tcprime_vy[2,:] = -np.sin(delta)*np.sin(alpha)*plx_mod/_A\n\n\tcprime_vz[0,:] = 0.\n\tcprime_vz[1,:] = 0. \n\tcprime_vz[2,:] = np.cos(delta)*plx_mod/_A\n\n\tdlnd_dpi, dlnd_dsigmav = np.zeros(N), np.zeros(N)\n\tde_dpi, de_dsigmav = np.zeros(N), np.zeros(N)\n\t\n\n\t### See formula A.5 \n\tde_dpi[:] = ((sigma_v/_A)**2.)*2.*plx_mod[:]\n\tde_dsigmav[:] = ((plx_mod[:]/_A)**2.)*2.*sigma_v\n\t\n\tdlnd_dpi[:] = (invD[1,1,:] + invD[2,2,:])*de_dpi[:] \n\tdlnd_dsigmav[:] = (invD[1,1,:] + invD[2,2,:])*de_dsigmav[:]\n\t\n\t\n\t\n\t### See formula A.7\n\thess = np.zeros((N+4, N+4))\n\n\thess_diag_pi, hess_diag_pi_1, hess_diag_pi_2 = np.zeros(N), np.zeros(N), np.zeros(N)\n\thess_diag_pi_1[:] = invD[0, 0, :]*cprime_pi[0, :]*cprime_pi[0, :] + invD[0, 1, :]*cprime_pi[0, :]*cprime_pi[1, :] + invD[0, 2, :]*cprime_pi[0, :]*cprime_pi[2, :] + \\\n\t\t\t invD[1, 0, :]*cprime_pi[1, :]*cprime_pi[0, :] + invD[1, 1, :]*cprime_pi[1, :]*cprime_pi[1, :] + invD[1, 2, :]*cprime_pi[1, :]*cprime_pi[2, :] + \\\n\t\t \t invD[2, 0, :]*cprime_pi[2, :]*cprime_pi[0, :] + invD[2, 1, :]*cprime_pi[2, :]*cprime_pi[1, :] + invD[2, 2, :]*cprime_pi[2, :]*cprime_pi[2, :]\t\n\t\n\t\n\t#hess_diag_pi_2[:] = np.sum(0.5*(invD[1, 1, :]**2. + 2.*invD[1, 2, :]**2. + invD[2, 2, :]**2.)*de_dpi[:]*de_dpi[:]) ### Check if it's with or without sum: without!\n\t# So correct formula is below.\n\thess_diag_pi_2[:] = (0.5*(invD[1, 1, :]**2. + 2.*invD[1, 2, :]**2. + invD[2, 2, :]**2.)*de_dpi[:]*de_dpi[:])\n\thess_diag_pi[:] = hess_diag_pi_1[:] + hess_diag_pi_2[:]\t\n\n\t\n\thess_diag_vx, hess_diag_vy, hess_diag_vz, hess_diag_sigmav = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N)\n\thess_pi_vx, hess_pi_vy, hess_pi_vz, hess_pi_sigmav = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N)\n\thess_diag_vxi, hess_diag_vyi, hess_diag_vzi = np.zeros(N), np.zeros(N), np.zeros(N)\n\t\n\thess_diag_vxi[:] = invD[0, 0, :]*cprime_vx[0, :]*cprime_vx[0, :] + invD[0, 1, :]*cprime_vx[0, :]*cprime_vx[1, :] + invD[0, 2, :]*cprime_vx[0, :]*cprime_vx[2, :] + \\\n\t\t\t invD[1, 0, :]*cprime_vx[1, :]*cprime_vx[0, :] + invD[1, 1, :]*cprime_vx[1, :]*cprime_vx[1, :] + invD[1, 2, :]*cprime_vx[1, :]*cprime_vx[2, :] + \\\n\t\t\t invD[2, 0, :]*cprime_vx[2, :]*cprime_vx[0, :] + invD[2, 1, :]*cprime_vx[2, :]*cprime_vx[1, :] + invD[2, 2, :]*cprime_vx[2, :]*cprime_vx[2, :] \t\t\n\t\n\thess_diag_vyi[:] = invD[0, 0, :]*cprime_vy[0, :]*cprime_vy[0, :] + invD[0, 1, :]*cprime_vy[0, :]*cprime_vy[1, :] + invD[0, 2, :]*cprime_vy[0, :]*cprime_vy[2, :] +\\\n\t\t\t invD[1, 0, :]*cprime_vy[1, :]*cprime_vy[0, :] + invD[1, 1, :]*cprime_vy[1, :]*cprime_vy[1, :] + invD[1, 2, :]*cprime_vy[1, :]*cprime_vy[2, :] +\\\n\t\t\t invD[2, 0, :]*cprime_vy[2, :]*cprime_vy[0, :] + invD[2, 1, :]*cprime_vy[2, :]*cprime_vy[1, :] + invD[2, 2, :]*cprime_vy[2, :]*cprime_vy[2, :] \t\n\n\n\thess_diag_vzi[:] = invD[0, 0, :]*cprime_vz[0, :]*cprime_vz[0, :] + invD[0, 1, :]*cprime_vz[0, :]*cprime_vz[1, :] + invD[0, 2, :]*cprime_vz[0, :]*cprime_vz[2, :] +\\\n\t\t\t invD[1, 0, :]*cprime_vz[1, :]*cprime_vz[0, :] + invD[1, 1, :]*cprime_vz[1, :]*cprime_vz[1, :] + invD[1, 2, :]*cprime_vz[1, :]*cprime_vz[2, :] +\\\n\t\t\t invD[2, 0, :]*cprime_vz[2, :]*cprime_vz[0, :] + invD[2, 1, :]*cprime_vz[2, :]*cprime_vz[1, :] + invD[2, 2, :]*cprime_vz[2, :]*cprime_vz[2, :] \t\t\n\t\n\n\thess_pi_vx[:] = invD[0, 0, :]*cprime_pi[0,:]*cprime_vx[0, :] + invD[0, 1, :]*cprime_pi[0,:]*cprime_vx[1, :] + invD[0, 2, :]*cprime_pi[0,:]*cprime_vx[2, :] +\\\n\t\t\tinvD[1, 0, :]*cprime_pi[1,:]*cprime_vx[0, :] + invD[1, 1, :]*cprime_pi[1,:]*cprime_vx[1, :] + invD[1, 2, :]*cprime_pi[1,:]*cprime_vx[2, :] +\\\n\t\t\tinvD[2, 0, :]*cprime_pi[2,:]*cprime_vx[0, :] + invD[2, 1, :]*cprime_pi[2,:]*cprime_vx[1, :] + invD[2, 2, :]*cprime_pi[2,:]*cprime_vx[2, :] \n\n\thess_pi_vy[:] = invD[0, 0, :]*cprime_pi[0,:]*cprime_vy[0, :] + invD[0, 1, :]*cprime_pi[0,:]*cprime_vy[1, :] + invD[0, 2, :]*cprime_pi[0,:]*cprime_vy[2, :] +\\\n\t\t\tinvD[1, 0, :]*cprime_pi[1,:]*cprime_vy[0, :] + invD[1, 1, :]*cprime_pi[1,:]*cprime_vy[1, :] + invD[1, 2, :]*cprime_pi[1,:]*cprime_vy[2, :] +\\\n\t\t\tinvD[2, 0, :]*cprime_pi[2,:]*cprime_vy[0, :] + invD[2, 1, :]*cprime_pi[2,:]*cprime_vy[1, :] + invD[2, 2, :]*cprime_pi[2,:]*cprime_vy[2, :] \n\n\thess_pi_vz[:] = invD[0, 0, :]*cprime_pi[0,:]*cprime_vz[0, :] + invD[0, 1, :]*cprime_pi[0,:]*cprime_vz[1, :] + invD[0, 2, :]*cprime_pi[0,:]*cprime_vz[2, :] +\\\n\t\t\tinvD[1, 0, :]*cprime_pi[1,:]*cprime_vz[0, :] + invD[1, 1, :]*cprime_pi[1,:]*cprime_vz[1, :] + invD[1, 2, :]*cprime_pi[1,:]*cprime_vz[2, :] +\\\n\t\t\tinvD[2, 0, :]*cprime_pi[2,:]*cprime_vz[0, :] + invD[2, 1, :]*cprime_pi[2,:]*cprime_vz[1, :] + invD[2, 2, :]*cprime_pi[2,:]*cprime_vz[2, :] \n\n\t\t\t\t\t\t\n\thess_diag_vx = np.sum(hess_diag_vxi)\n\thess_diag_vy = np.sum(hess_diag_vyi)\n\thess_diag_vz = np.sum(hess_diag_vzi)\t\n\t\n\thess_diag_sigmav = np.sum(0.5*(invD[1, 1, :]**2. + 2.*invD[1, 2, :]**2. + invD[2, 2, :]**2.)*de_dsigmav[:]*de_dsigmav[:])\n\thess_pi_sigmav[:] = 0.5*(invD[1, 1, :]**2. + 2.*invD[1, 2, :]**2. + invD[2, 2, :]**2.)*de_dpi[:]*de_dsigmav[:] \n\n\thess_diag = np.concatenate((hess_diag_pi, np.array([hess_diag_vx, hess_diag_vy, hess_diag_vz, hess_diag_sigmav])))\n\t\n\tfor i in range(N+4):\n\t\thess[i, i] = hess_diag[i]\n\t\t\n\t\n\tfor j in range(N):\n\t\t\thess[j, -4] = hess_pi_vx[j]\n\t\t\thess[j, -3] = hess_pi_vy[j]\n\t\t\thess[j, -2] = hess_pi_vz[j]\n\t\t\thess[j, -1] = hess_pi_sigmav[j]\n\t\t\thess[-4, j] = hess_pi_vx[j]\n\t\t\thess[-3, j] = hess_pi_vy[j] \n\t\t\thess[-2, j] = hess_pi_vz[j]\n\t\t\thess[-1, j] = hess_pi_sigmav[j]\n\t\t\t\n\n\t\n\t\n\tpart_12, part_13, part_23 = np.zeros(N),np.zeros(N),np.zeros(N)\n\tfor ia in range(3):\n\t\tfor ib in range(3):\n\t\t\tpart_12[:] += invD[ia, ib, :]*cprime_vx[ia, :]*cprime_vy[ib, :] \n\t\t\tpart_13[:] += invD[ia, ib, :]*cprime_vx[ia, :]*cprime_vz[ib, :] \n\t\t\tpart_23[:] += invD[ia, ib, :]*cprime_vy[ia, :]*cprime_vz[ib, :] \t\t\t\t\n\n\thess[-4, -3] = np.sum(part_12)\n\thess[-3, -4] = hess[-4, -3]\n\t\n\thess[-4, -2] = np.sum(part_13)\n\thess[-2, -4] = hess[-4, -2]\n\n\thess[-3, -2] = np.sum(part_23)\n\thess[-2, -3] = hess[-3, -2]\n\n\t#### I am returning here the matrix Njk, which is defined as -E(H),\n\t#### where H is the hessian of the likelihood: therefore to obtain the real hessian, one\n\t#### should multiply this by '-1' (see function below.)\n\treturn hess ### See eq. 18",
"def mr(A, n_iterations, stop=False):\n assert len(A.sizes) == 2\n assert A.sizes[0] == A.sizes[1]\n M = A.same_shape()\n n = A.sizes[0]\n @for_range(n)\n def _(i):\n e = sfix.Array(n)\n e.assign_all(0)\n e[i] = 1\n M[i] = solve_linear(A, e, n_iterations, stop=stop)\n return M.transpose()",
"def generate(n):\n \n m1 = np.zeros((n, n), dtype = int)\n m2 = np.zeros((n, n), dtype = int)\n \n for i in range(n):\n for j in range(n):\n m1[i][j] = (j % 32)\n m2[i][j] = (j % 64)\n \n return m1,m2",
"def compute_limit_matrix(gamma, adjacency, n_states):\n num_states = n_states\n identity = np.eye(num_states)\n return np.linalg.inv(identity - gamma * adjacency / 6)",
"def matrix_chain_dynamic(dimensions, n):\n\n m = [[-1 for _ in range(n)] for _ in range(n)]\n s = [[0 for _ in range(n)] for _ in range(n)]\n\n # multiplying matrix by itself\n for i in range(1, n):\n m[i][i] = 0\n\n for length in range(2, n):\n for i in range(1, n - length + 1):\n j = i + length - 1\n for k in range(i, j):\n cost = m[i][k] + m[k + 1][j] + dimensions[i - 1] * dimensions[k] * dimensions[j]\n if cost > m[i][j]:\n m[i][j] = cost\n # index if splitting\n s[i][j] = k\n return m, s",
"def magma_izamax(n, dx, incx, queue):\n\n return _libmagma.magma_izamax(n, int(dx), incx, queue)",
"def form_matrix_yt(w):\r\n M = np.zeros((len(w),len(w)))\r\n for i in range(len(w)):\r\n for j in range(len(w)):\r\n M[i,j] = YoungTableaux(w[i],w[j]).CMNR()\r\n return M",
"def all_matrices(n):\n complete = int(n * (n-1) / 2)\n least = (n-1)*2 - 1 # the number of edges is at least 2(n-1)-1\n all_possible_list = [i for i in itertools.product([0, 1], repeat=complete)\n if sum(i) >= least]\n all_mats = [create_matrix(i, n) for i in all_possible_list]\n return all_mats",
"def cgmat(A,x,b,M=None,max_it=100,tol=1e-8):\n if M is None:\n M= sp.diag(A)\n bnrm2 = sp.linalg.norm(b)\n r=b-A.dot(x)\n rho=sp.zeros(max_it)\n for i in range(max_it):\n z=sp.linalg.solve(M,r)\n rho[i] = sp.dot(r,z)\n if i==0:\n p=z\n else:\n beta=rho/rho[i-1]\n p=z+beta*p\n\n q=A.dot(p)\n alpha=rho/sp.dot(p,q)\n x = x+alpha*p\n r = r-alpha*q\n error = sp.linalg.norm( r ) / bnrm2\n if error <tol:\n return (x,error,i,False)\n\n return (x,error,max_it,True)"
] | [
"0.6948238",
"0.6483979",
"0.647635",
"0.60975",
"0.577624",
"0.56804156",
"0.56472903",
"0.56135315",
"0.55913776",
"0.54975516",
"0.5475175",
"0.5427021",
"0.5401388",
"0.5397574",
"0.5332212",
"0.53113306",
"0.5310061",
"0.526007",
"0.52369773",
"0.5234702",
"0.5232932",
"0.523096",
"0.52242684",
"0.52240026",
"0.52226496",
"0.52179766",
"0.521545",
"0.51903176",
"0.5187259",
"0.5185672"
] | 0.6586663 | 1 |
Flattens each 2D detector layer into a 1D array | def flatten_layers(data):
return data.reshape((data.shape[0], data.shape[1], -1)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _extract_array(tiffs: list[np.ndarray], idx: int, shape: tuple[int, ...], dtype: type | np.dtype) -> np.ndarray:\n feature_arrays = (np.atleast_3d(img)[..., idx] for img in tiffs)\n return np.asarray(list(feature_arrays), dtype=dtype).reshape(*shape, 1)",
"def flattenImage(input_array):\r\n shp = np.size(input_array)\r\n return np.reshape(input_array, (shp,))",
"def flat_to_2d(data, det_width):\n return data.reshape((data.shape[0], data.shape[1], det_width, det_width))",
"def flatten_image(x):\n *batch_shape, h, w, c = x.shape\n return x.reshape((*batch_shape, h * w * c))",
"def flatten_numpy(ndarray):\n return np.reshape(ndarray, (-1,), 'F')",
"def flatten(X):\n N = X.shape[-1]\n flat = np.zeros((N, 3072))\n for idx, i in enumerate(range(N)):\n # if not idx:\n # print(X[:,:,:,i].reshape(3072))\n flat[i] = X[:,:,:,i].reshape(3072)\n return flat",
"def features_to_np_array(self, images):\n \n images = list(images)\n \n images = np.stack(images, axis=0)\n \n return images",
"def flatten_layers(gradient):\n X_flat = np.array([])\n for name,layer in sorted(gradient.items()):\n cur = np.array(layer,dtype=np.float64).flatten()\n X_flat = np.concatenate((X_flat,cur),axis=0)\n return X_flat",
"def FlattenModelData(y, i):\n outs = np.array([y[j][i][0] for j in range(len(y))])\n return outs",
"def _convert_to_features(self, img: np.ndarray) -> np.ndarray:",
"def image_to_feature_vector(raw_tensor):\n result = []\n for tensor in raw_tensor:\n result.append(tensor.flatten())\n return result",
"def _reshape_channels(x):\n assert x.dim() == 4\n batch_size, nc, h, w = x.size()\n x_t = x.view(batch_size, nc, -1).transpose(1, 2).contiguous()\n x_t = x_t.view(batch_size, h, w, nc)\n return x_t",
"def flatten_array(self):\n numel = self.xyz_array[:, :, 0].size # Number of elements in dataset\n self.flat_array = np.zeros([self._len_z, numel]) # Create array to hold flattened array\n\n # Loop through each dimension (dataset) and flatten it into new array\n for dim in range(self._len_z):\n self.flat_array[dim, :] = np.ravel(self.xyz_array[:, :, dim])",
"def get_all_layers(self, img): # noqa\n s1_out, c1_out, s2_out, c2_out = self.run_all_layers(img)\n return (\n [s1.cpu().detach().numpy() for s1 in s1_out],\n [c1.cpu().detach().numpy() for c1 in c1_out],\n [[s2_.cpu().detach().numpy() for s2_ in s2] for s2 in s2_out],\n [c2.cpu().detach().numpy() for c2 in c2_out],\n )",
"def extract_feat(self, img):\n xb = self.backbone(img)\n if self.with_neck:\n xn = self.neck(xb)\n #for xx in xb:\n # print(xx.shape)\n # print(xb[2].shape)\n return [xb[2]], xn",
"def detection_collate(batch):\n targets = []\n imgs = []\n for sample in batch:\n imgs.append(sample[0])\n targets.append(torch.FloatTensor(sample[1]))\n return torch.stack(imgs, 0), targets",
"def _convert_raw_outputs(self, raw_output):\n outputs = [\n np.array(raw_output.getLayerFp16(self._output_layers[i]),\n dtype=np.float32).reshape((1, -1) + self._output_shape)\n for i in range(len(self._output_layers))\n ]\n return outputs",
"def to_2d_array(self):\n return reshape_fns.to_2d(self._obj, raw=True)",
"def flatten_layer(inputs):\n return Spectrum(\n connections=[input_ for depth_slice in inputs\n for row in depth_slice\n for input_ in row]\n )",
"def flatten_channel(utt_feats):\n assert(isinstance(utt_feats, np.ndarray) and utt_feats.ndim==3)\n return np.concatenate(utt_feats, axis=1)",
"def flatten(self):\n return DataArray([s for s in self.unstructured()])",
"def convert_image_to_1d_array(x):\r\n\r\n #x = cv2.imread(img, cv2.IMREAD_GRAYSCALE)\r\n if x is None:\r\n print( \"ERROR: loading image ' + img + ' failed.\")\r\n return None\r\n \r\n x = cv2.threshold(x, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\r\n if x is None:\r\n print (\"ERROR: thresholding image ' + img + ' failed.\")\r\n return None\r\n\r\n return x.flatten()",
"def test_unstack2():\n x = np.arange(1, 25).reshape((4, 2, 3)).astype(np.float32)\n axis = 2\n x_tensor = paddle.to_tensor(x)\n out_list = paddle.unstack(x_tensor, axis=axis)\n length = len(out_list)\n for i in range(length):\n ept = x[:, :, i]\n npt.assert_allclose(out_list[i].numpy(), ept)",
"def flatten_data(X):\n\n return X.reshape((-1, X.shape[-1]))",
"def patch2X(patch): #X\n return patch.reshape(-1)",
"def _flatten_batch(self, matrix_tups):\n out_vecs = []\n for t in matrix_tups:\n for v in t:\n new_shape = (v.shape[0],)\n if len(v.shape) > 1:\n new_shape = new_shape + (np.prod(v.shape[1:]),)\n out_vecs.append(v.reshape(new_shape))\n return jnp.concatenate(out_vecs, axis=1)",
"def flatten():",
"def array(self):\n aa = list(map(np.asarray, self.loader_array.flat))\n return np.stack(aa, axis=0).reshape(self.output_shape)",
"def flatten_image(data):\n\t# print(img.shape[0])\n\t# print(img.shape[1])\n\t# cv2.imshow('image',img)\n\t# gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\t# plt.imshow(gray)\n\t# plt.show()\n\t# X_normalized = preprocessing.normalize(img, norm='l2')\n\t\n\t# s = img.shape[0] * img.shape[1]\n\t# img_wide = img.reshape((1, s,-1))\t\n\t# img_wide = np.rollaxis(X_normalized, axis=1, start=0)\n\t# plt.imshow(img_wide[0])\n\t# plt.show()\n\t# print(X_normalized)\n\tnsamples, nx, ny = data.shape\n\td2_train_dataset = data.reshape((nsamples,nx*ny))\n\treturn d2_train_dataset",
"def _flatten(params):\n params, _ = tree_flatten(params)\n return jnp.concatenate([jnp.reshape(param, [-1]) for param in params])"
] | [
"0.65512943",
"0.63869417",
"0.6224323",
"0.62073267",
"0.6146212",
"0.61214846",
"0.60448676",
"0.6012312",
"0.6004641",
"0.59888744",
"0.5885316",
"0.587274",
"0.58667505",
"0.58584476",
"0.5855942",
"0.58501977",
"0.58499795",
"0.5836353",
"0.5810114",
"0.57356036",
"0.5725367",
"0.5718183",
"0.5717675",
"0.57041115",
"0.57003593",
"0.5693622",
"0.56907684",
"0.56858444",
"0.5683697",
"0.56831837"
] | 0.6616059 | 0 |
Allow dumping the packed files to a folder. Returns a zipfile.write() method. | def get_zip_writer(zipfile: ZipFile):
dump_folder = CONF['packfile_dump', '']
if not dump_folder:
return zipfile.write
dump_folder = os.path.abspath(dump_folder)
# Delete files in the folder, but don't delete the folder itself.
try:
dump_files = os.listdir(dump_folder)
except FileNotFoundError:
pass
else:
for name in dump_files:
name = os.path.join(dump_folder, name)
if os.path.isdir(name):
shutil.rmtree(name)
else:
os.remove(name)
def write_to_zip(filename, arcname):
dump_loc = os.path.join(dump_folder, arcname)
os.makedirs(os.path.dirname(dump_loc), exist_ok=True)
shutil.copy(filename, dump_loc)
zipfile.write(filename, arcname)
return write_to_zip | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pack_file(zip_write, filename: str, suppress_error=False):\n if '\\t' in filename:\n # We want to rename the file!\n filename, arcname = filename.split('\\t')\n else:\n arcname = filename\n\n if filename[-1] == '*':\n # Pack a whole folder (blah/blah/*)\n directory = filename[:-1]\n file_count = 0\n for poss_path in RES_ROOT:\n dir_path = os.path.normpath(\n os.path.join(poss_path, directory)\n )\n if not os.path.isdir(dir_path):\n continue\n for subfile in os.listdir(dir_path):\n full_path = os.path.join(dir_path, subfile)\n rel_path = os.path.join(directory, subfile)\n zip_write(\n filename=full_path,\n arcname=rel_path,\n )\n file_count += 1\n LOGGER.info('Packed {} files from folder \"{}\"', file_count, directory)\n return\n\n for poss_path in RES_ROOT:\n full_path = os.path.normpath(\n os.path.join(poss_path, filename)\n )\n if os.path.isfile(full_path):\n zip_write(\n filename=full_path,\n arcname=arcname,\n )\n break\n else:\n if not suppress_error:\n LOGGER.warning(\n '\"bee2/' + filename + '\" not found! (May be OK if not custom)'\n )",
"def zip_data(self) -> None:\n zipf = zipfile.ZipFile('output.zip', 'w', zipfile.ZIP_DEFLATED)\n self._zipdir(self.path, zipf)\n zipf.close()",
"def _zip_files(self):\n\n zip_file = Path(self.build_directory.parent).joinpath(\n self.package_name + '.zip'\n )\n logger.info('Creating zip file: %s', zip_file)\n\n shutil.make_archive(zip_file.with_suffix(''), 'zip', self.build_directory)\n shutil.move(str(zip_file), self.build_directory)",
"def zipdata(filename: str) -> None:\n\n # Generate the path to the project TODO: check if this is entire project or server\n directoryName = ROOT.split(\"/\")[-3]\n codeDestination = \"/\".join(ROOT.split(\"/\")[:-2])\n\n # Create the output file\n zippedFile = zipfile.ZipFile(filename, \"w\", compression=zipfile.ZIP_DEFLATED)\n\n # Walk over the directory and save all files\n for abspath, dirnames, filenames in os.walk(codeDestination):\n local = abspath[abspath.index(directoryName):]\n [zippedFile.write(os.path.join(abspath, name), os.path.join(local, name)) for name in filenames]\n\n # Close the zip file\n zippedFile.close()",
"def zip_output(directory):\n #directory = client_variables.output_zip_folder\n #create the zip archive\n zip = zipfile.ZipFile('outputs.zip', 'w')\n\n # add all files in specified folder\n for name in glob.glob(directory + '\\\\*'):\n zip.write(name, os.path.basename(name), zipfile.ZIP_DEFLATED)\n zip.close()",
"def zipdir(path, file_name):\n length = len(path)\n zipf = zipfile.ZipFile('output/'+f'Test_{file_name}.pptx', 'w', zipfile.ZIP_DEFLATED)\n for root, dirs, files in os.walk(path):\n folder = root[length:] # path without \"parent\"\n for file in files:\n zipf.write(os.path.join(root, file), os.path.join(folder, file))\n zipf.close()\n return",
"def zip_files():\n zipper = ZipFile(\"Moritz_Bunse_ML_project.zip\", \"w\")\n files_to_write = [\"poi_id.py\",\n \"my_classifier.pkl\",\n \"my_dataset.pkl\",\n \"my_feature_list.pkl\",\n \"tester.py\",\n \"Look+At+Enron+data+set.html\",\n \"Look At Enron data set.ipynb\",\n \"data_dict.pkl\",\n \"final_project_dataset.pkl\",\n \"img/Flow chart feature selection.png\"\n ]\n for filename in files_to_write:\n zipper.write(filename)\n\n zipper.close()",
"def make_zip_file(file_folder_path,file_name_list,output_file):\n ffp = file_folder_path\n if ffp is None:\n ffp = \"\"\n else:\n ffp += \"/\"\n with zipfile.ZipFile(output_file, 'w') as zf:\n for file_name in file_name_list:\n fpath = ffp + str(file_name)\n if not os.path.isfile(fpath):\n continue\n file_data = open(fpath,'r').read() \n data = zipfile.ZipInfo(file_name)\n data.compress_type = zipfile.ZIP_DEFLATED\n zf.writestr(data, file_data)",
"def backupToZip(folder):\n\n folder = os.path.abspath(folder) #Ensure we're using the absolute path\n number = 1\n\n while True:\n zipFilename = os.path.basename(folder) + '_' + str(number) + '.zip'\n if not os.path.exists(zipFilename):\n break\n number += 1\n\n #Create the zip file\n print('Creating %s...' % (zipFilename))\n backupZip = zipfile.ZipFile(zipFilename,'w')\n\n #Walk the directory tree and compress the files in each folder\n for foldername, subfolders, filenames in os.walk(folder):\n print('Adding files in %s...' % (foldername))\n\n #Add current folder to the zip file\n backupZip.write(foldername)\n\n #Add all files in this folder to the zip file\n for file in filenames:\n newBase = os.path.basename(folder) + '_'\n if file.startswith(newBase) and file.endswith('zip'):\n continue #Don't back up zip files\n backupZip.write(os.path.join(foldername, file))\n backupZip.close()\n print('Done.')",
"def make_zip(self):\n shutil.make_archive(self.name, 'zip', self.name)",
"def pack_zip(output_filename, sources):\n previous_dir = os.getcwd()\n if not isinstance(sources, (list, tuple)) and \\\n isinstance(sources, str):\n sources = [sources]\n zip_ds = zipfile.ZipFile(output_filename, 'w', zipfile.ZIP_DEFLATED)\n for source in sources:\n os.chdir(os.path.dirname(source))\n if os.path.isdir(source):\n for root, dirs, files in os.walk(os.path.basename(source)):\n for file in files:\n zip_ds.write(os.path.join(root, file))\n else:\n zip_ds.write(os.path.basename(source))\n zip_ds.close()\n os.chdir(previous_dir)",
"def zip_folder(source_path, destination_path, password):\n\n source_path = os.path.abspath(source_path)\n\n if not destination_path:\n destination_path = source_path + \".zip\"\n\n if not destination_path.endswith(\".zip\"):\n destination_path += \".zip\"\n\n try:\n parent_folder = os.path.dirname(source_path)\n contents = os.walk(source_path)\n\n if password:\n z = pyzipper.AESZipFile(destination_path + \"\\\\\", 'w', compression=pyzipper.ZIP_LZMA, encryption=pyzipper.WZ_AES)\n z.setpassword(password)\n else:\n z = pyzipper.ZipFile(destination_path + \"\\\\\", 'w', compression=pyzipper.ZIP_LZMA)\n\n try:\n for root, folders, files in contents:\n # Include all subfolders, including empty ones.\n for folder_name in folders:\n absolute_path = os.path.join(root, folder_name)\n relative_path = absolute_path.replace(parent_folder + '\\\\', '')\n print(f\"Adding {absolute_path} to archive.\")\n z.write(absolute_path, relative_path)\n for file_name in files:\n absolute_path = os.path.join(root, file_name)\n relative_path = absolute_path.replace(parent_folder + '\\\\', '')\n print(f\"Adding {absolute_path} to archive.\")\n z.write(absolute_path, relative_path)\n print(f\"{destination_path} created successfully.\")\n\n except Exception:\n tb = traceback.format_exc()\n print(\"Something went wrong\")\n print(tb)\n\n finally:\n z.close()\n\n except Exception:\n tb = traceback.format_exc()\n print(\"Something went wrong\")\n print(tb)",
"def zip_alg_file(task_id):\n start_dir = os.path.join(FILE_PATH, \"task\", task_id)\n res = None\n if os.path.exists(start_dir):\n zip_file_dir = os.path.join(FILE_PATH, \"task\", task_id + \".zip\")\n file = zipfile.ZipFile(zip_file_dir, \"w\", zipfile.ZIP_DEFLATED)\n for dir_path, _, file_names in os.walk(start_dir):\n for file_name in file_names:\n file.write(os.path.join(dir_path, file_name))\n file.close()\n res = zip_file_dir\n return res",
"def create_bagit_stream(dir_name, payload_info_list):\n zip_file = zipstream.ZipFile(mode=\"w\", compression=zipstream.ZIP_DEFLATED)\n _add_path(dir_name, payload_info_list)\n payload_byte_count, payload_file_count = _add_payload_files(\n zip_file, payload_info_list\n )\n tag_info_list = _add_tag_files(\n zip_file, dir_name, payload_info_list, payload_byte_count, payload_file_count\n )\n _add_manifest_files(zip_file, dir_name, payload_info_list, tag_info_list)\n _add_tag_manifest_file(zip_file, dir_name, tag_info_list)\n return zip_file",
"def zip_folder(folder_path, output_path):\n\n # Note: os.path.relpath() does not exist in Jython.\n # target = os.path.relpath(folder_path, start=os.path.dirname(folder_path))\n target = folder_path[folder_path.rfind(os.sep) + 1:]\n\n # Simple trick to build relative paths\n root_len = folder_path.find(target)\n\n try:\n\n # Open zip file (no compression)\n zip_file = zipfile.ZipFile(output_path, 'w', zipfile.ZIP_STORED, allowZip64=True)\n\n # Now recurse into the folder\n for root, folders, files in os.walk(folder_path):\n\n # We do not process folders. This is only useful to store empty\n # folders to the archive, but 1) jython's zipfile implementation\n # throws:\n #\n # Exception: [Errno 21] Is a directory <directory_name>\n #\n # when trying to write a directory to a zip file (in contrast to \n # Python's implementation) and 2) oBIT does not export empty\n # folders in the first place.\n\n # Build the relative directory path (current root)\n relative_dir_path = os.path.abspath(root)[root_len:]\n\n # If a folder only contains a subfolder, we disrupt the hierarchy,\n # unless we add a file.\n if len(files) == 0:\n touch(os.path.join(root, '~'))\n files.append('~')\n\n # Include all files\n for file_name in files:\n\n # Full file path to add\n full_file_path = os.path.join(root, file_name)\n relative_file_path = os.path.join(relative_dir_path, file_name)\n\n # Workaround problem with file name encoding\n full_file_path = full_file_path.encode('latin-1')\n relative_file_path = relative_file_path.encode('latin-1')\n\n # Write to zip\n zip_file.write(full_file_path, relative_file_path, \\\n zipfile.ZIP_STORED)\n\n except IOError, message:\n raise Exception(message)\n\n except OSError, message:\n raise Exception(message)\n\n except zipfile.BadZipfile, message:\n raise Exception(message)\n\n finally:\n zip_file.close()",
"def compressIfNeeded(self):\n\n if self._mode == \"zip\":\n zip_folder(self._rootExportPath, self.getZipArchiveFullPath())",
"def _make_archive(file_list, archive, root):\n with zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED) as zipf:\n for f in file_list:\n zipf.write(f, os.path.relpath(f, root))",
"def compress_files(self):\n archive_file_path = tkinter.filedialog.asksaveasfilename(parent=self,\n defaultextension=\".zip\",\n filetypes=[(\"Zip File\", \"*.zip\")])\n treeview_items = self.files_treeview.get_children()\n if archive_file_path and treeview_items:\n with ZipFile(archive_file_path, \"w\", ZIP_DEFLATED) as archive:\n for row in treeview_items:\n file_path = self.files_treeview.item(row, \"values\")[0]\n file_name = os.path.basename(file_path)\n archive.write(file_path, arcname=file_name)",
"def make_zipfile(output_filename, source_dir):\n import zipfile, zlib\n relroot = os.path.abspath(os.path.join(source_dir, os.pardir))\n with zipfile.ZipFile(output_filename, \"w\", zipfile.ZIP_DEFLATED, allowZip64) as zip:\n for root, dirs, files in os.walk(source_dir):\n # add directory (needed for empty dirs)\n zip.write(root, os.path.relpath(root, relroot))\n for file in files:\n filename = os.path.join(root, file)\n if os.path.isfile(filename): # regular files only\n arcname = os.path.join(os.path.relpath(root, relroot), file)\n zip.write(filename, arcname)",
"def toZip(self, file, zip_location):\n zip_file = zipfile.ZipFile(zip_location, 'w')\n if os.path.isfile(file):\n zip_file.write(file)\n else:\n self.__addFolderToZip(zip_file, file)\n print \"Wrote %s to %s\"%(file,zip_location)\n zip_file.close()",
"def zip_package(paths: List[Path], fp, compression=zipfile.ZIP_DEFLATED):\n\n with zipfile.ZipFile(\n file=fp, mode=\"w\", compression=compression, compresslevel=9\n ) as z:\n for path in paths:\n (local_path, zip_path) = path\n z.write(filename=str(path[0]), arcname=str(path[1]))",
"def zipdir(path, ziph):\n for root, dirs, files in os.walk(path):\n for file in files:\n ziph.write(os.path.join(root, file),\n arcname=os.path.join(os.path.relpath(root, path), file))",
"def write_zip_vanilla(zip, to_leave_vanilla):\n for from_file in to_leave_vanilla:\n with open(from_file) as file:\n contents = file.read()\n zip.writestr(os.path.join('data/minecraft/', from_file), contents)",
"def handle_package(self, prime_dir, bases_config: BasesConfiguration):\n emit.progress(\"Creating the package itself\")\n zipname = format_charm_file_name(self.config.name, bases_config)\n zipfh = zipfile.ZipFile(zipname, \"w\", zipfile.ZIP_DEFLATED)\n for dirpath, _dirnames, filenames in os.walk(prime_dir, followlinks=True):\n dirpath = pathlib.Path(dirpath)\n for filename in filenames:\n filepath = dirpath / filename\n zipfh.write(str(filepath), str(filepath.relative_to(prime_dir)))\n\n zipfh.close()\n return zipname",
"def zipfolder(zip_filename, folder, directory):\n with ZipFile(os.path.join(directory, f'{zip_filename}.zip'), 'w') as zipObj:\n for folderName, subfolders, filenames in os.walk(folder):\n for filename in filenames:\n filePath = os.path.join(folderName, filename)\n zipObj.write(filePath, os.path.relpath(filePath, folder))",
"def save_zip(zip_file, archive_dir):\n zipdata = zipfile.ZipFile(zip_file, mode='w')\n\n for root, dirs, files in os.walk(archive_dir):\n for name in files:\n fname = os.path.join(root, name)\n zipdata.write(fname)\n zipdata.close()",
"def zip_data_file(task_id, task_name, data_path):\n zip_file_dir = os.path.join(FILE_PATH, task_id + \".zip\")\n file = zipfile.ZipFile(zip_file_dir, \"w\", zipfile.ZIP_DEFLATED)\n sample_path = os.path.join(data_path, \"datasets\", str(task_id) + \"_\" + task_name + \".csv\")\n true_dag_path = os.path.join(data_path, \"true\", str(task_id) + \"_\" + task_name + \".npz\")\n file.write(sample_path)\n file.write(true_dag_path)\n file.close()\n return zip_file_dir",
"def _compress_folder() -> str:\n\n file_path = pathlib.Path.cwd().parent / TAR_NAME\n tar = tarfile.open(str(file_path), \"w\")\n\n # this string contained in the path? exclude\n excluded = ['loaded_from_sacred', '.git', '.idea', '.tar', '__pycache__', '.DS_Store', '.pytest_cache', 'blogpost']\n\n def filter_function(tarinfo):\n for ex in excluded:\n if ex in tarinfo.name:\n return None\n else:\n return tarinfo\n\n folder_name = pathlib.Path.cwd()\n\n print(f'Compressing {pathlib.Path.cwd()} to {file_path} ')\n tar.add(folder_name, recursive=True, filter=filter_function, arcname=folder_name.parts[-1])\n tar.close()\n return folder_name.stem",
"def zip_dir(in_dir, out_loc, ext=None):\n zipf = zipfile.ZipFile(out_loc, \"w\", zipfile.ZIP_DEFLATED)\n for root, dirs, files in os.walk(in_dir):\n for f in files:\n if ext is not None:\n if os.path.splitext(f)[-1] == ext:\n zipf.write(os.path.join(root, f), os.path.relpath(\n os.path.join(root, f), os.path.join(out_loc, '..')))\n else:\n zipf.write(os.path.join(root, f), os.path.relpath(\n os.path.join(root, f), os.path.join(out_loc, '..')))\n zipf.close()\n print(\"Wrote zip file with source code to {}\".format(out_loc))",
"def make_zipfile(output_filename, source_dir):\n relroot = os.path.abspath(os.path.join(source_dir, os.pardir))\n with zipfile.ZipFile(output_filename, \"w\", zipfile.ZIP_DEFLATED) as zip:\n for root, dirs, files in os.walk(source_dir):\n # add directory (needed for empty dirs)\n zip.write(root, os.path.relpath(root, relroot))\n for file in files:\n filename = os.path.join(root, file)\n if os.path.isfile(filename): # regular files only\n arcname = os.path.join(os.path.relpath(root, relroot), file)\n zip.write(filename, arcname)"
] | [
"0.7331823",
"0.7246399",
"0.70958483",
"0.7027812",
"0.6948637",
"0.6911189",
"0.68038255",
"0.6792268",
"0.6757164",
"0.6726353",
"0.6699952",
"0.6696708",
"0.66505677",
"0.6580177",
"0.65668654",
"0.65554553",
"0.65272367",
"0.6514948",
"0.650231",
"0.6482944",
"0.64609164",
"0.645763",
"0.64573014",
"0.6445027",
"0.64423627",
"0.64221835",
"0.6405357",
"0.6387674",
"0.6384316",
"0.6383584"
] | 0.7529858 | 0 |
Generate a new game_sounds_manifest.txt file. This includes all the current scripts defined, plus any custom ones. Excludes is a list of scripts to remove from the listing this allows overriding the sounds without VPK overrides. | def gen_sound_manifest(additional, excludes):
if not additional:
return # Don't pack, there aren't any new sounds..
orig_manifest = os.path.join(
'..',
SOUND_MAN_FOLDER.get(CONF['game_id', ''], 'portal2'),
'scripts',
'game_sounds_manifest.txt',
)
try:
with open(orig_manifest) as f:
props = Property.parse(f, orig_manifest).find_key(
'game_sounds_manifest', [],
)
except FileNotFoundError: # Assume no sounds
props = Property('game_sounds_manifest', [])
scripts = [prop.value for prop in props.find_all('precache_file')]
for script in additional:
scripts.append(script)
for script in excludes:
try:
scripts.remove(script)
except ValueError:
LOGGER.warning(
'"{}" should be excluded, but it\'s'
' not in the manifest already!',
script,
)
# Build and unbuild it to strip other things out - Valve includes a bogus
# 'new_sound_scripts_must_go_below_here' entry..
new_props = Property('game_sounds_manifest', [
Property('precache_file', file)
for file in scripts
])
inject_loc = os.path.join('bee2', 'inject', 'soundscript_manifest.txt')
with open(inject_loc, 'w') as f:
for line in new_props.export():
f.write(line)
LOGGER.info('Written new soundscripts_manifest..') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def export_sounds(names, path, base_label='Sound_'):\n\tfor filename, output in dump_sounds(names, base_label):\n\t\twith open(os.path.join(path, filename), 'w') as out:\n\t\t\tout.write(output)",
"def add_sounds(self) -> None:\n self.sounds.append(arcade.Sound(\"sounds/minecraft-theme.mp3\"))\n self.sounds.append(arcade.Sound(\"sounds/starcraft-theme.mp3\"))\n self.sounds.append(arcade.Sound(\"sounds/player_attack.mp3\"))",
"def generate_music_script(data: Property, pack_list):\n # We also pack the filenames used for the tracks - that way funnel etc\n # only get packed when needed. Stock sounds are in VPKS or in aperturetag/,\n # we don't check there.\n # The voice attrs used in the map - we can skip tracks\n voice_attr = CONF['VoiceAttr', ''].casefold().split(';')\n\n funnel = data.find_key('tbeam', '')\n bounce = data.find_key('bouncegel', '')\n speed = data.find_key('speedgel', '')\n\n # The sounds must be present, and the items should be in the map.\n has_funnel = funnel.value and (\n 'funnel' in voice_attr or\n 'excursionfunnel' in voice_attr\n )\n has_bounce = bounce.value and (\n 'bouncegel' in voice_attr or\n 'bluegel' in voice_attr\n )\n # Speed-gel sounds also play when flinging, so keep it always.\n\n with open(os.path.join('bee2', 'inject', 'music_script.txt'), 'w') as file:\n # Write the base music track\n file.write(MUSIC_START.format(name='', vol='1'))\n write_sound(file, data.find_key('base'), pack_list, snd_prefix='#*')\n file.write(MUSIC_BASE)\n # The 'soundoperators' section is still open now.\n\n # Add the operators to play the auxilluary sounds..\n if has_funnel:\n file.write(MUSIC_FUNNEL_MAIN)\n if has_bounce:\n file.write(MUSIC_GEL_BOUNCE_MAIN)\n if speed.value:\n file.write(MUSIC_GEL_SPEED_MAIN)\n\n # End the main sound block\n file.write(MUSIC_END)\n\n if has_funnel:\n # Write the 'music.BEE2_funnel' sound entry\n file.write('\\n')\n file.write(MUSIC_START.format(name='_funnel', vol='1'))\n write_sound(file, funnel, pack_list, snd_prefix='*')\n # Some tracks want the funnel music to sync with the normal\n # track, others randomly choose a start.\n file.write(\n MUSIC_FUNNEL_SYNC_STACK\n if data.bool('sync_funnel') else\n MUSIC_FUNNEL_RAND_STACK\n )\n file.write(MUSIC_FUNNEL_UPDATE_STACK)\n\n if has_bounce:\n file.write('\\n')\n file.write(MUSIC_START.format(name='_gel_bounce', vol='0.5'))\n write_sound(file, bounce, pack_list, snd_prefix='*')\n # Fade in fast (we never get false positives, but fade out slow\n # since this disables when falling back..\n file.write(MUSIC_GEL_STACK.format(fadein=0.25, fadeout=1.5))\n\n if speed.value:\n file.write('\\n')\n file.write(MUSIC_START.format(name='_gel_speed', vol='0.5'))\n write_sound(file, speed, pack_list, snd_prefix='*')\n # We need to shut off the sound fast, so portals don't confuse it.\n # Fade in slow so it doesn't make much sound (and also as we get\n # up to speed). We stop almost immediately on gel too.\n file.write(MUSIC_GEL_STACK.format(fadein=0.5, fadeout=0.1))",
"def dump_sounds(names, base_label='Sound_'):\n\tpointer_length = 2\n\tpointer_address = 0x4ca2\n\t# sfx: pointer_address = 0x63ce\n\n\taddresses = []\n\tfor i, name in enumerate(names):\n\t\tsong_index, bank = get_song_bank(i)\n\t\taddress = read_address_pointer(\n\t\t\t(bank - 1) * 0x4000 +\n\t\t\tpointer_address +\n\t\t\tsong_index * pointer_length\n\t\t)\n\t\taddresses += [address]\n\n\t# Do an extra pass to grab labels from each song.\n\t# There's no getting around this since the\n\t# Graveyard themes share labels.\n\n\tsounds = {}\n\tall_labels = []\n\tfor name, address in zip(names, addresses):\n\t\tsound = Sound(address, base_label + name)\n\t\tsounds[name] = sound\n\t\tall_labels += sound.labels\n\n\toutputs = []\n\tfor name, address in zip(names, addresses):\n\t\tsound = sounds[name]\n\t\toutput = sound.to_asm(all_labels) + '\\n'\n\t\tfilename = name.lower() + '.asm'\n\t\toutputs += [(filename, output)]\n\n\treturn outputs",
"def gen_part_manifest(additional):\n if not additional:\n return # Don't pack, there aren't any new particles..\n\n orig_manifest = os.path.join(\n '..',\n GAME_FOLDER.get(CONF['game_id', ''], 'portal2'),\n 'particles',\n 'particles_manifest.txt',\n )\n\n try:\n with open(orig_manifest) as f:\n props = Property.parse(f, orig_manifest).find_key(\n 'particles_manifest', [],\n )\n except FileNotFoundError: # Assume no particles\n props = Property('particles_manifest', [])\n\n parts = [prop.value for prop in props.find_all('file')]\n\n for particle in additional:\n parts.append(particle)\n\n # Build and unbuild it to strip comments and similar lines.\n new_props = Property('particles_manifest', [\n Property('file', file)\n for file in parts\n ])\n\n inject_loc = os.path.join('bee2', 'inject', 'particles_manifest.txt')\n with open(inject_loc, 'w') as f:\n for line in new_props.export():\n f.write(line)\n\n LOGGER.info('Written new particles_manifest..')",
"def create_mp3():\n\n #TODO: les roles ne devraient pas etre en dur\n list_all_roles = [\n [],\n [\"morgan\"],\n [\"oberon\"],\n [\"mordred\"],\n [\"morgan\", \"oberon\"],\n [\"morgan\", \"mordred\"],\n [\"oberon\", \"mordred\"],\n [\"morgan\", \"oberon\", \"mordred\"]\n ]\n\n for list_roles in list_all_roles:\n\n list_mp3 = [\"init.mp3\", \"serv_mord.mp3\"]\n if \"oberon\" in list_roles:\n list_mp3.append(\"oberon.mp3\")\n list_mp3.append(\"red_identi.mp3\")\n\n if \"morgan\" in list_roles:\n list_mp3.append(\"add_per_mor.mp3\")\n\n list_mp3.append(\"serv_mord.mp3\")\n if \"mordred\" in list_roles:\n list_mp3.append(\"mordred.mp3\")\n list_mp3.extend([\"merlin_identi.mp3\", \"end.mp3\"])\n\n mp3_combined = AudioSegment.empty()\n for mp3 in list_mp3:\n mp3_combined += AudioSegment.from_mp3(\"resources/{}\".format(mp3))\n\n mp3_combined.export(\"resources/_{}.mp3\".format('-'.join(sorted(list_roles))), format=\"mp3\")",
"def create_wav_file(self, ):\n\n f_out = open(self.wav_file, 'w')\n u_utt2spk = open(self.utt2spk, 'w')\n for file in glob.glob(self.wav_folder+'/*.wav'):\n base = os.path.basename(file).split('.')[0]\n # write to scp file\n f_out.write(base + '\\t' + file + '\\n')\n u_utt2spk.write(base + '\\t' + 'tts' + '\\n')",
"def write_sound(file, snds: Property, pack_list, snd_prefix='*'):\n if snds.has_children():\n file.write('\"rndwave\"\\n\\t{\\n')\n for snd in snds:\n file.write(\n '\\t\"wave\" \"{sndchar}{file}\"\\n'.format(\n file=snd.value.lstrip(SOUND_CHARS),\n sndchar=snd_prefix,\n )\n )\n pack_list.add('sound/' + snd.value.casefold())\n file.write('\\t}\\n')\n else:\n file.write(\n '\"wave\" \"{sndchar}{file}\"\\n'.format(\n file=snds.value.lstrip(SOUND_CHARS),\n sndchar=snd_prefix,\n )\n )\n pack_list.add('sound/' + snds.value.casefold())",
"def add_spe_to_blender(self):\n from distutils.file_util import copy_file \n import Blender\n #important local variables\n #\n srcdir = info.PATH #_spe directory\n dstdir = Blender.Get('uscriptsdir') #preferred Blender script directory (can be '')\n altdir = Blender.Get('scriptsdir') #the other Blender script directory \n #'uscriptsdir' can be empty - in such case use 'scriptsdir': \n if not dstdir: dstdir, altdir = altdir, None\n #\n #2. Main operation: try to update the *.py file at dstdir,\n # optionally remove eventual old location from altdir:\n #\n cpyresult = rmresult = mresult = \"\" #helpers for message fromatting\n for fname in (BLENDER_SHORTCUT_SPE,BLENDER_SHORTCUT_WINPDB):\n src = os.path.join(srcdir,fname)\n result = copy_file(src, os.path.join(dstdir,fname),update=1)\n if result[1]: #copied! \n cpyresult += \", \" + fname #if suceeded: add fname to the message\n #\n #if we have copied fname with success - there should not be \n # two fname scripts (one for every Blender scripts directory): \n # try to remove the unwanted one from the altdir (Blender 'scriptsdir')\n #\n if altdir and os.access(altdir,os.W_OK): \n try: #let's try to remove it from unused dir:\n os.remove(os.path.join(altdir, fname)) \n rmresult += \", \" + fname #OK, succeed: add fname to the message\n except:\n pass #just continue - it is not a big problem\n #\n #3. Update Blender:\n #\n Blender.UpdateMenus()\n #\n #4. Final message to the user:\n #\n #([2:] is used in strings to discard leading \", \"):\n msg = \"Blender menu updated.\\n\\n\"\n if cpyresult: msg+= \"Copied %s to %s.\\n\\n\" % (cpyresult[2:], dstdir)\n if rmresult: msg+= \"Removed %s from %s. \" % (rmresult[2:], altdir)\n self.message(msg)\n #self.SetStatusText(msg,1)",
"def __load_all_sounds(sounds_dict, directory, accept=('.ogg')):\r\n for sound in os.listdir(directory):\r\n name, ext = os.path.splitext(sound)\r\n if ext.lower() in accept:\r\n sounds_dict[name] = pygame.mixer.Sound(os.path.join(directory, sound))",
"def _create_manifest(self, templates_dir, static_dir):\n return \"\"\"\n graft %(templates_dir)s\n graft %(static_dir)s\n\n include COPYING\n include INSTALL\n include README.md\n include *-requirements.txt\n\n global-exclude .*.sw[op] *.py[co] __pycache__ .DS_Store .noseids\n \"\"\" % {\n 'templates_dir': templates_dir,\n 'static_dir': static_dir,\n }",
"def load_sounds(l):\r\n if not pygame.mixer.get_init():\r\n pygame.mixer.init()\r\n sounds = [pygame.mixer.Sound(f) for f in l]\r\n return dict(zip(l, sounds))",
"def generateManifest(syn, allFiles, filename):\n keys, data = _extract_file_entity_metadata(syn, allFiles)\n _write_manifest_data(filename, keys, data)",
"def save_all_scripts(genre):\n if os.path.exists('scripts'):\n pass\n else:\n os.mkdir('scripts')\n\n titles = list_titles(genre)\n for title in titles:\n save_script(title)",
"def load_sounds(self, folder):\n pygame.mixer.init()\n cwd = os.getcwd()\n dir = cwd + '/' + folder\n files = os.listdir(dir)\n for file in files:\n snd = pygame.mixer.Sound(dir + '/' + file)\n self.sounds.append(snd)",
"def generate_audio():\n text, lang = introduction()\n ses = boto3.Session(profile_name=\"default\")\n pol = ses.client(\"polly\")\n res = pol.synthesize_speech(Text=text, LanguageCode=lang, OutputFormat=\"mp3\", VoiceId=VOICE)\n return res",
"def dump_pinball_music():\n\texport_sounds(song_labels, os.path.join(conf.path, 'music'), 'Music_')",
"async def generate_audio(self, site, text, payload):\n cache_path = self.config['services']['Pico2wavTtsService'].get(\n 'cache_path', '/tmp/tts_cache')\n value = payload.get('id', 'no_id')\n\n if text:\n short_text = text[0:100].replace(' ', '_').replace(\".\", \"\")\n # speakable and limited\n say_text = text[0:300].replace('(', '').replace(')', '')\n short_file_name = clean_filename('tts-' + str(short_text)) + '.wav'\n file_name = os.path.join(cache_path, short_file_name)\n\n # generate if file doesn't exist in cache\n if not os.path.isfile(file_name):\n path = self.config['services']['Pico2wavTtsService']['binary_path']\n command = path + ' -w=' + file_name + ' \"{}\" '.format(say_text)\n executor = concurrent.futures.ProcessPoolExecutor(\n max_workers=1,\n )\n await self.loop.run_in_executor(executor, os_system, command)\n\n async with aiofiles.open(file_name, mode='rb') as send_file:\n audio_file = await send_file.read()\n await self.client.subscribe('hermod/{}/speaker/finished'.format(site))\n if site in self.clients and self.clients[site].get(\n 'platform', '') == \"web\" and self.clients[site].get('url', False):\n await self.client.publish(\\\n 'hermod/{}/speaker/play/{}'.format(site, value), payload=json.dumps({\n \"url\": self.clients[site].get('url') + \"/tts/\" + short_file_name\n }), qos=0)\n else:\n slice_length = 2048\n\n def chunker(seq, size):\n \"\"\" return chunks\"\"\"\n return (seq[pos:pos + size] for pos in range(0, len(seq), size))\n for chunk in chunker(audio_file, slice_length):\n await self.client.publish('hermod/{}/speaker/cache/{}'.format(site, value)\\\n , payload=bytes(chunk), qos=0)\n\n # finally send play message with empty payload\n await self.client.publish(\n 'hermod/{}/speaker/play/{}'.format(site, value), payload=None, qos=0)\n\n await self.cleanup_file(short_text, file_name)",
"def pack_content(bsp_file: BSP, path: str, is_peti: bool):\n files = set() # Files to pack.\n soundscripts = set() # Soundscripts need to be added to the manifest too..\n rem_soundscripts = set() # Soundscripts to exclude, so we can override the sounds.\n particles = set()\n additional_files = set() # .vvd files etc which also are needed.\n preload_files = set() # Files we want to force preloading\n\n try:\n pack_list = open(path[:-4] + '.filelist.txt')\n except (IOError, FileNotFoundError):\n pass # Assume no files if missing..\n # There might still be things to inject.\n else:\n with pack_list:\n for line in pack_list:\n line = line.strip().lower()\n if not line or line.startswith('//'):\n continue # Skip blanks or comments\n\n if line[:8] == 'precache':\n preload_files.add(line)\n continue\n\n if line[:2] == '-#':\n rem_soundscripts.add(line[2:])\n continue\n\n if line[:1] == '#':\n line = line[1:]\n soundscripts.add(line)\n\n # We need to add particle systems to a manifest.\n if line.startswith('particles/'):\n particles.add(line)\n\n if line[-4:] == '.mdl':\n additional_files.update({\n line[:-4] + ext\n for ext in\n MDL_ADDITIONAL_EXT\n })\n\n files.add(line)\n\n # Remove guessed files not in the original list.\n additional_files -= files\n\n # Only generate a soundscript for PeTI maps..\n if is_peti:\n music_data = CONF.find_key('MusicScript', [])\n if music_data.value:\n generate_music_script(music_data, files)\n # Add the new script to the manifest file..\n soundscripts.add('scripts/BEE2_generated_music.txt')\n\n # We still generate these in hammer-mode - it's still useful there.\n # If no files are packed, no manifest will be added either.\n gen_sound_manifest(soundscripts, rem_soundscripts)\n gen_part_manifest(particles)\n gen_auto_script(preload_files, is_peti)\n\n inject_names = list(inject_files())\n\n # Abort packing if no packfiles exist, and no injected files exist either.\n if not files and not inject_names:\n LOGGER.info('No files to pack!')\n return\n\n LOGGER.info('Files to pack:')\n for file in sorted(files):\n # \\t seperates the original and in-pack name if used.\n LOGGER.info(' # \"' + file.replace('\\t', '\" as \"') + '\"')\n\n if additional_files and LOGGER.isEnabledFor(logging.DEBUG):\n LOGGER.info('Potential additional files:')\n for file in sorted(additional_files):\n LOGGER.debug(' # \"' + file + '\"')\n\n LOGGER.info('Injected files:')\n for _, file in inject_names:\n LOGGER.info(' # \"' + file + '\"')\n\n LOGGER.info(\"Packing Files!\")\n\n # Manipulate the zip entirely in memory\n zip_data = BytesIO()\n zip_data.write(bsp_file.get_lump(BSP_LUMPS.PAKFILE))\n zipfile = ZipFile(zip_data, mode='a')\n LOGGER.debug(' - Existing zip read')\n\n zip_write = get_zip_writer(zipfile)\n\n for file in files:\n pack_file(zip_write, file)\n\n for file in additional_files:\n pack_file(zip_write, file, suppress_error=True)\n\n for filename, arcname in inject_names:\n LOGGER.info('Injecting \"{}\" into packfile.', arcname)\n zip_write(filename, arcname)\n\n LOGGER.debug(' - Added files')\n\n zipfile.close() # Finalise the zip modification\n\n # Copy the zipfile into the BSP file, and adjust the headers\n bsp_file.replace_lump(\n path,\n BSP_LUMPS.PAKFILE,\n zip_data.getvalue(), # Get the binary data we need\n )\n LOGGER.debug(' - BSP written!')\n\n LOGGER.info(\"Packing complete!\")",
"def random_explode_sound(self) -> ba.Sound:\n return self.explode_sounds[random.randrange(len(self.explode_sounds))]",
"def createMelody(song, outputSongFileName, timing=4):\n wavInput = (())\n wavInput1 = (())\n wavInput2 = (())\n wavInput3 = (())\n\n # Remove the beginning and end portions of the canvas that are blank\n while song[0] == ['R','R','R','R']:\n del song[0]\n while song[-1] == ['R','R','R','R']:\n del song[-1]\n\n for notesList in song:\n\n remove_dup(notesList)\n\n notesNum = []\n for i in range(len(notesList)):\n if (notesList[i].upper() == 'R'):\n notesNum.append('')\n elif (notesList[i].upper() == 'A' or notesList[i].upper() == 'B'):\n notesNum.append('3')\n else:\n notesNum.append('4')\n\n wavInput = ((notesList[0].lower() + str(notesNum[0]), timing),) + wavInput\n wavInput1 = ((notesList[1].lower() + str(notesNum[1]), timing),) + wavInput1\n wavInput2 = ((notesList[2].lower() + str(notesNum[2]), timing),) + wavInput2\n wavInput3 = ((notesList[3].lower() + str(notesNum[3]), timing),) + wavInput3\n\n\n wavInput = wavInput[::-1]\n wavInput1 = wavInput1[::-1]\n wavInput2 = wavInput2[::-1]\n wavInput3 = wavInput3[::-1]\n\n wavNames = [\".wav1.wav\",\".wav2.wav\",\".wav3.wav\",\".wav4.wav\"]\n wavInputs = [wavInput,wavInput1,wavInput2,wavInput3]\n\n validWavInputs = []\n\n for i in range(len(wavInputs)):\n if isAllRests(wavInputs[i]) == False:\n validWavInputs.append(wavInputs[i])\n\n validWavNames = wavNames[:len(validWavInputs)]\n\n call(['python','GenerateWavFiles.py',str(validWavNames) + \"@\" + str(validWavInputs)])\n\n sounds = []\n for i in range(len(validWavNames)):\n sounds.append(AudioSegment.from_wav(validWavNames[i]))\n\n combined = sounds[0]\n for i in range(1, len(sounds)):\n combined = combined.overlay(sounds[i])\n\n combined.export(outputSongFileName, format='wav')",
"def create_sound_map():\n pass",
"def write_shell_scripts(airfoils, qsh_template, nsetup, ntype, out_dir):\n for nairfoil, sim_setup in airfoils.iteritems():\n for aoa in sim_setup['aoas']:\n # Create simulation name\n sim_name = create_sim_name(nairfoil, ntype, nsetup, aoa)\n # Create fluent journal file\n with open(qsh_template, 'r') as f:\n qtxt = f.read()\n # Start to replace parameters inside the journal\n qtxt = qtxt.replace('SIMNAME', sim_name)\n qtxt = qtxt.replace('in.jou', sim_name + '.jou')\n qtxt = qtxt.replace('fluent.out', sim_name + '.out')\n # Write new shell script to out_dir\n qout = sim_name + '.qsh'\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n with open(os.path.join(out_dir, qout), 'w') as f:\n f.write(qtxt)\n return True",
"def gen_auto_script(preload, is_peti):\n dest = os.path.join('bee2', 'inject', 'auto_run.nut')\n if not preload and not is_peti:\n return # Don't add for hammer maps\n\n with open(dest, 'w') as file:\n if not preload:\n # Leave it empty, don't write an empty function body.\n file.write('//---\\n')\n return\n\n file.write('function Precache() {\\n')\n for entry in preload:\n if entry.startswith('precache_sound:'):\n file.write('\\tself.PrecacheSoundScript(\"{}\");\\n'.format(\n entry[15:],\n ))\n file.write('}\\n')",
"def write_out_scripts(script_dict, path, out_path):\n names = os.path.basename(path)\n file_name = names.lower().replace(\"-\", \"_\") + \".json\"\n path_dir = get_directory(os.path.expanduser(path))\n\n if out_path is not None:\n path_dir = os.path.expanduser(out_path)\n if not os.path.exists(path_dir):\n os.mkdir(path_dir)\n\n write_path = os.path.join(path_dir, file_name)\n\n if not (script_dict and \"resources\" in script_dict):\n print(write_path + \" creation skipped because resources were empty.\")\n return\n if os.path.exists(write_path):\n choice = clean_input(write_path + \" already exists. Overwrite the script? [y/n]\")\n if choice == \"n\":\n print(write_path + \" creation skipped.\")\n return\n try:\n with open_fw(write_path) as output_path:\n sorted_dict = collections.OrderedDict(script_dict.items())\n json_str = json.dumps(sorted_dict, sort_keys=True, indent=4)\n output_path.write(json_str)\n print(\"Successfully wrote scripts to \" + os.path.abspath(write_path))\n output_path.close()\n except Exception as error:\n print(write_path + \" could not be created. {}\".format(error.message))",
"def merge_waves(self):\n dirname = self.dirname\n name = self.get_name()\n videocluster = os.path.join(dirname, name)\n if sys.platform == 'win32':\n videocluster = dirname + '/' + name\n listwaves = os.listdir(videocluster)\n listwaves.sort()\n listw = [os.path.join(videocluster, fil) for fil in listwaves]\n #file_basename = os.path.join(dirname, name)\n if sys.platform == 'win32':\n listw = [videocluster + '/' + fil for fil in listwaves] \n # file_basename = dirname + '/' + name\n self.wave = os.path.join(dirname, name + \".wav\")\n if sys.platform == 'win32':\n self.wave = dirname + '/' + name + \".wav\"\n fm.merge_waves(listw, self.wave)",
"def copy_scripts (self):\n self.mkpath(self.build_dir)\n outfiles = []\n for source, scriptname in self.scripts:\n script = util.convert_path(source)\n # skip empty files\n if not os.path.getsize(script):\n self.warn(\"%s is an empty file (skipping)\" % script)\n continue\n\n if os.name != 'posix' and not scriptname.endswith('.py'):\n # add py extensions on systems, which don't understand\n # shebangs\n scriptname += '.py'\n outfile = os.path.join(self.build_dir, scriptname)\n outfiles.append(outfile)\n\n if not self.force and not dep_util.newer(script, outfile):\n log.debug(\"not copying %s (up-to-date)\", script)\n continue\n\n if not self._adjust_shebang(script, outfile):\n # just copy script, if there was no sheband to adjust\n self.copy_file(script, outfile)",
"def write_manifest(file, args_func):\n\n label = args_func['label']\n split_duration = args_func['split_duration']\n time_length = args_func['time_length']\n\n res = []\n try:\n sr = 16000\n x, _sr = librosa.load(file, sr=sr)\n duration = librosa.get_duration(x, sr=sr)\n\n left = duration\n current_offset = 0\n status = 'single'\n\n while left > 0:\n if left <= split_duration:\n if status == 'single':\n write_duration = left\n current_offset = 0\n else:\n status = 'end'\n write_duration = left + time_length\n current_offset -= time_length\n offset_inc = left\n left = 0\n else:\n if status == 'start' or status == 'next':\n status = 'next'\n else:\n status = 'start'\n\n if status == 'start':\n write_duration = split_duration\n offset_inc = split_duration\n else:\n write_duration = split_duration + time_length\n current_offset -= time_length\n offset_inc = split_duration + time_length\n\n left -= split_duration\n\n metadata = {\n 'audio_filepath': file,\n 'duration': write_duration,\n 'label': label,\n 'text': '_',\n 'offset': current_offset,\n }\n res.append(metadata)\n\n current_offset += offset_inc\n\n except Exception as e:\n err_file = \"error.log\"\n with open(err_file, 'w') as fout:\n fout.write(file + \":\" + str(e))\n\n return res",
"def generate_playlist():\n\n with open(r'C:\\Users\\adria\\OneDrive\\Desktop\\Muzica.txt', 'w+', encoding='utf-8') as playlist:\n playlist_songs = os.listdir('D:\\\\Muzica\\\\')\n for song in playlist_songs:\n playlist.write(song + '\\n')",
"def write_sound(self, current_sound_conf):\n print \"SOUND as parameter: \", current_sound_conf\n try:\n if current_sound_conf:\n current_sound_str = 'ON'\n else:\n current_sound_str = 'OFF'\n self.store.put(SOUND_STORE, sound=current_sound_str)\n except:\n print \"Error: cannot save game sound configuration!\""
] | [
"0.634194",
"0.6167628",
"0.6143036",
"0.574207",
"0.5583267",
"0.5577609",
"0.55683976",
"0.54692596",
"0.53306884",
"0.52429533",
"0.523025",
"0.5224628",
"0.5199384",
"0.5083675",
"0.50342655",
"0.49966714",
"0.49445674",
"0.48723647",
"0.48541355",
"0.48308998",
"0.48279",
"0.48041233",
"0.47337073",
"0.46965456",
"0.46937987",
"0.4688395",
"0.46873212",
"0.4660812",
"0.4654188",
"0.46470025"
] | 0.82363987 | 0 |
Generate a new particle system manifest file. This includes all the current ones defined, plus any custom ones. | def gen_part_manifest(additional):
if not additional:
return # Don't pack, there aren't any new particles..
orig_manifest = os.path.join(
'..',
GAME_FOLDER.get(CONF['game_id', ''], 'portal2'),
'particles',
'particles_manifest.txt',
)
try:
with open(orig_manifest) as f:
props = Property.parse(f, orig_manifest).find_key(
'particles_manifest', [],
)
except FileNotFoundError: # Assume no particles
props = Property('particles_manifest', [])
parts = [prop.value for prop in props.find_all('file')]
for particle in additional:
parts.append(particle)
# Build and unbuild it to strip comments and similar lines.
new_props = Property('particles_manifest', [
Property('file', file)
for file in parts
])
inject_loc = os.path.join('bee2', 'inject', 'particles_manifest.txt')
with open(inject_loc, 'w') as f:
for line in new_props.export():
f.write(line)
LOGGER.info('Written new particles_manifest..') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_manifest(self):\n import time\n import sys\n with open('bake-manifest-' + time.strftime('%Y-%m-%d-%H:%M:%S') + \n '.txt', 'w') as hout:\n hout.write(' '.join(sys.argv) + '\\n')\n for k, v in self.table.items():\n hout.write(';'.join([k] + v) + '\\n')",
"def create_puppet_manifest(self):\n\t\tfilename = '/etc/puppet/manifests/cpanel.d/postunsuspendacct/%s.pp' % self.argv.get('user')\n\t\tfileobj = open(filename, 'w')\t\t\t\t\n\t\tfileobj.write(self.title)\n\t\tfileobj.write(self.puppet_resource)\n\t\tfileobj.close()\n\t\tprint \"[%s] Saved puppet manifest '%s'\" % (ctime(), filename)",
"def generateManifest(syn, allFiles, filename):\n keys, data = _extract_file_entity_metadata(syn, allFiles)\n _write_manifest_data(filename, keys, data)",
"def _create_manifest(self, templates_dir, static_dir):\n return \"\"\"\n graft %(templates_dir)s\n graft %(static_dir)s\n\n include COPYING\n include INSTALL\n include README.md\n include *-requirements.txt\n\n global-exclude .*.sw[op] *.py[co] __pycache__ .DS_Store .noseids\n \"\"\" % {\n 'templates_dir': templates_dir,\n 'static_dir': static_dir,\n }",
"def createParticles(self, type, style, *args):\n if not self.rank:\n logging.info('Creating particles {} with args'.format(type) + (' {}' * len(args)).format(*args))\n\n self.lmp.command('create_atoms {} {}'.format(type, style) + (' {}' * len(args)).format(*args))",
"def gen_sound_manifest(additional, excludes):\n if not additional:\n return # Don't pack, there aren't any new sounds..\n\n orig_manifest = os.path.join(\n '..',\n SOUND_MAN_FOLDER.get(CONF['game_id', ''], 'portal2'),\n 'scripts',\n 'game_sounds_manifest.txt',\n )\n\n try:\n with open(orig_manifest) as f:\n props = Property.parse(f, orig_manifest).find_key(\n 'game_sounds_manifest', [],\n )\n except FileNotFoundError: # Assume no sounds\n props = Property('game_sounds_manifest', [])\n\n scripts = [prop.value for prop in props.find_all('precache_file')]\n\n for script in additional:\n scripts.append(script)\n\n for script in excludes:\n try:\n scripts.remove(script)\n except ValueError:\n LOGGER.warning(\n '\"{}\" should be excluded, but it\\'s'\n ' not in the manifest already!',\n script,\n )\n\n # Build and unbuild it to strip other things out - Valve includes a bogus\n # 'new_sound_scripts_must_go_below_here' entry..\n new_props = Property('game_sounds_manifest', [\n Property('precache_file', file)\n for file in scripts\n ])\n\n inject_loc = os.path.join('bee2', 'inject', 'soundscript_manifest.txt')\n with open(inject_loc, 'w') as f:\n for line in new_props.export():\n f.write(line)\n LOGGER.info('Written new soundscripts_manifest..')",
"def create(self, content, **kwargs):\n with open(self._manifest.path, 'w') as manifest_file:\n base_info = {\n 'version': self._manifest.VERSION,\n 'type': self._manifest.TYPE,\n 'properties': {\n 'name': os.path.basename(content.source_path),\n 'resolution': content.frame_sizes,\n 'length': content.get_size(),\n },\n }\n for key, value in base_info.items():\n json_item = json.dumps({key: value}, separators=(',', ':'))\n manifest_file.write(f'{json_item}\\n')\n\n for item in content:\n json_item = json.dumps({\n 'number': item[0],\n 'pts': item[1],\n 'checksum': item[2]\n }, separators=(',', ':'))\n manifest_file.write(f\"{json_item}\\n\")\n self._manifest.is_created = True",
"def create_manifest():\n dirpath = os.getcwd()\n file_path_ori = dirpath + \"/manifest.json\"\n file_path_new = dirpath + \"/manifests3.json\"\n\n with open(file_path_ori, \"rt\") as fin:\n with open(file_path_new, \"wt\") as fout:\n for line in fin:\n fout.write(line.replace('bucket-name', bucketName))",
"def create(self, content, **kwargs):\n with open(self._manifest.path, 'w') as manifest_file:\n base_info = {\n 'version': self._manifest.VERSION,\n 'type': self._manifest.TYPE,\n }\n for key, value in base_info.items():\n json_item = json.dumps({key: value}, separators=(',', ':'))\n manifest_file.write(f'{json_item}\\n')\n\n for item in content:\n json_item = json.dumps({\n key: value for key, value in item.items()\n }, separators=(',', ':'))\n manifest_file.write(f\"{json_item}\\n\")\n self._manifest.is_created = True",
"def create_file_empty_particles( self, fullpath, iteration,\n time, dt, select_nglobal_dict=None ):\n # Create the file (can be done by one proc or in parallel)\n f = self.open_file( fullpath,\n parallel_open=self.write_metadata_parallel )\n\n # Setup the different layers of the openPMD file\n # (f is None if this processor does not participate is writing data)\n if f is not None:\n\n # Setup the attributes of the top level of the file\n self.setup_openpmd_file( f, iteration, time, dt )\n # Setup the meshes group (contains all the particles)\n f.attrs[\"particlesPath\"] = np.string_(\"particles/\")\n particle_path = \"/data/%d/particles/\" %iteration\n particle_grp = f.require_group(particle_path)\n # Loop through all particle species\n for species_name in sorted(self.species_dict.keys()):\n species = self.species_dict[species_name]\n\n # Check the number of particles to write\n if select_nglobal_dict is not None:\n N = select_nglobal_dict[species_name]\n else:\n N = None\n\n # Create and setup the h5py.Group species_grp\n species_path = particle_path+\"%s/\" %(species_name)\n species_grp = f.require_group( species_path )\n self.setup_openpmd_species_group( species_grp, species, N=N )\n\n # Loop over the different quantities that should be written\n # and setup the corresponding datasets\n for particle_var in self.particle_data:\n\n # Vector quantities\n if particle_var in [\"position\", \"momentum\", \"E\", \"B\"]:\n # Setup the dataset\n quantity_path=species_path+ \"%s/\" %particle_var\n quantity_grp = f.require_group(quantity_path)\n for coord in [\"x\",\"y\",\"z\"]:\n # Create the dataset (fixed size or appendable)\n if N is not None:\n dset = quantity_grp.create_dataset(\n coord, (N,), dtype='f8')\n else:\n dset = quantity_grp.create_dataset(\n coord, (0,), maxshape=(None,), dtype='f8')\n self.setup_openpmd_species_component( dset )\n self.setup_openpmd_species_record( quantity_grp,\n particle_var)\n\n # Scalar quantity\n elif particle_var in [\"weighting\", \"id\", \"t\"]:\n # Choose the type of the output\n if particle_var == \"id\":\n dtype = 'uint64'\n else:\n dtype = 'f8'\n # Create the dataset (fixed size or appendable)\n if N is not None:\n dset = species_grp.create_dataset(\n particle_var, (N,), dtype=dtype )\n else:\n dset = species_grp.create_dataset( particle_var,\n (0,), maxshape=(None,), dtype=dtype)\n self.setup_openpmd_species_component( dset )\n self.setup_openpmd_species_record( dset, particle_var )\n\n # Unknown field\n else:\n raise ValueError(\n \"Invalid string in particletypes: %s\" %particle_var)\n\n # Close the file\n f.close()",
"def manifest():\n return setup((), _manifest=1)",
"def pack_content(bsp_file: BSP, path: str, is_peti: bool):\n files = set() # Files to pack.\n soundscripts = set() # Soundscripts need to be added to the manifest too..\n rem_soundscripts = set() # Soundscripts to exclude, so we can override the sounds.\n particles = set()\n additional_files = set() # .vvd files etc which also are needed.\n preload_files = set() # Files we want to force preloading\n\n try:\n pack_list = open(path[:-4] + '.filelist.txt')\n except (IOError, FileNotFoundError):\n pass # Assume no files if missing..\n # There might still be things to inject.\n else:\n with pack_list:\n for line in pack_list:\n line = line.strip().lower()\n if not line or line.startswith('//'):\n continue # Skip blanks or comments\n\n if line[:8] == 'precache':\n preload_files.add(line)\n continue\n\n if line[:2] == '-#':\n rem_soundscripts.add(line[2:])\n continue\n\n if line[:1] == '#':\n line = line[1:]\n soundscripts.add(line)\n\n # We need to add particle systems to a manifest.\n if line.startswith('particles/'):\n particles.add(line)\n\n if line[-4:] == '.mdl':\n additional_files.update({\n line[:-4] + ext\n for ext in\n MDL_ADDITIONAL_EXT\n })\n\n files.add(line)\n\n # Remove guessed files not in the original list.\n additional_files -= files\n\n # Only generate a soundscript for PeTI maps..\n if is_peti:\n music_data = CONF.find_key('MusicScript', [])\n if music_data.value:\n generate_music_script(music_data, files)\n # Add the new script to the manifest file..\n soundscripts.add('scripts/BEE2_generated_music.txt')\n\n # We still generate these in hammer-mode - it's still useful there.\n # If no files are packed, no manifest will be added either.\n gen_sound_manifest(soundscripts, rem_soundscripts)\n gen_part_manifest(particles)\n gen_auto_script(preload_files, is_peti)\n\n inject_names = list(inject_files())\n\n # Abort packing if no packfiles exist, and no injected files exist either.\n if not files and not inject_names:\n LOGGER.info('No files to pack!')\n return\n\n LOGGER.info('Files to pack:')\n for file in sorted(files):\n # \\t seperates the original and in-pack name if used.\n LOGGER.info(' # \"' + file.replace('\\t', '\" as \"') + '\"')\n\n if additional_files and LOGGER.isEnabledFor(logging.DEBUG):\n LOGGER.info('Potential additional files:')\n for file in sorted(additional_files):\n LOGGER.debug(' # \"' + file + '\"')\n\n LOGGER.info('Injected files:')\n for _, file in inject_names:\n LOGGER.info(' # \"' + file + '\"')\n\n LOGGER.info(\"Packing Files!\")\n\n # Manipulate the zip entirely in memory\n zip_data = BytesIO()\n zip_data.write(bsp_file.get_lump(BSP_LUMPS.PAKFILE))\n zipfile = ZipFile(zip_data, mode='a')\n LOGGER.debug(' - Existing zip read')\n\n zip_write = get_zip_writer(zipfile)\n\n for file in files:\n pack_file(zip_write, file)\n\n for file in additional_files:\n pack_file(zip_write, file, suppress_error=True)\n\n for filename, arcname in inject_names:\n LOGGER.info('Injecting \"{}\" into packfile.', arcname)\n zip_write(filename, arcname)\n\n LOGGER.debug(' - Added files')\n\n zipfile.close() # Finalise the zip modification\n\n # Copy the zipfile into the BSP file, and adjust the headers\n bsp_file.replace_lump(\n path,\n BSP_LUMPS.PAKFILE,\n zip_data.getvalue(), # Get the binary data we need\n )\n LOGGER.debug(' - BSP written!')\n\n LOGGER.info(\"Packing complete!\")",
"def manufacturing_manifest_json(self):\n\n file_name = os.path.join(self.cur_8digit_dir, \"manufacturing.manifest.json\")\n\n with open(file_name) as f_in:\n man_manifest = json.load(f_in)\n\n for component in man_manifest[\"ComponentManufactureList\"]:\n # Get ManufacturingModel (if exists)\n if \"ManufacturingModel\" in component:\n rel_path_from_results = os.path.join(self.cur_8digit_dir, component[\"ManufacturingModel\"])\n if os.path.exists(rel_path_from_results):\n self.files_for_export.append(os.path.join('results', rel_path_from_results))\n\n # Get STEPModel (if exists)\n if \"STEPModel\" in component:\n rel_path_from_results = os.path.join(self.cur_8digit_dir, component[\"STEPModel\"])\n if os.path.exists(rel_path_from_results):\n self.files_for_export.append(os.path.join('results', rel_path_from_results))\n\n # Since this is a PrepareForIFAB folder, gather all *.stp files in ./AP203_E2_SEPERATE_PART_FILES/\n path_to_stp_files = os.path.join(self.cur_8digit_dir,'AP203_E2_SEPERATE_PART_FILES')\n filter_for_stp_files = os.path.join(path_to_stp_files,'*.stp')\n stp_files = glob(filter_for_stp_files)\n for stp_file in stp_files:\n self.files_for_export.append(os.path.join('results',stp_file))\n\n # If there's a TDP tag, \n # add the referred file,\n\t# then crawl that file and gather its file references\n if \"TDP\" in man_manifest:\n tdp_json_path = os.path.join(self.cur_8digit_dir,man_manifest[\"TDP\"])\n self.tdp_json(tdp_json_path)\n self.files_for_export.append(os.path.join('results', tdp_json_path))",
"def _buildmanifest(self):\n\n man = self._parents[0].manifest().copy()\n copied = self._repo.dirstate.copies()\n is_exec = util.execfunc(self._repo.root,\n lambda p: man.execf(copied.get(p,p)))\n is_link = util.linkfunc(self._repo.root,\n lambda p: man.linkf(copied.get(p,p)))\n modified, added, removed, deleted, unknown = self._status[:5]\n for i, l in ((\"a\", added), (\"m\", modified), (\"u\", unknown)):\n for f in l:\n man[f] = man.get(copied.get(f, f), nullid) + i\n try:\n man.set(f, is_exec(f), is_link(f))\n except OSError:\n pass\n\n for f in deleted + removed:\n if f in man:\n del man[f]\n\n self._manifest = man",
"def _write_particle_information(\n xml_file, structure, xyz, forcefield, ref_distance, ref_mass, ref_energy\n):\n xml_file.write('<position units=\"sigma\" num=\"{}\">\\n'.format(xyz.shape[0]))\n for pos in xyz:\n xml_file.write(\"{}\\t{}\\t{}\\n\".format(*pos / ref_distance))\n xml_file.write(\"</position>\\n\")\n if forcefield:\n types = [atom.type for atom in structure.atoms]\n else:\n types = [atom.name for atom in structure.atoms]\n\n xml_file.write(\"<type>\\n\")\n for atom_type in types:\n xml_file.write(\"{}\\n\".format(atom_type))\n xml_file.write(\"</type>\\n\")\n\n masses = [atom.mass for atom in structure.atoms]\n xml_file.write(\"<mass>\\n\")\n for mass in masses:\n if mass == 0:\n mass = 1.0\n xml_file.write(\"{}\\n\".format(mass / ref_mass))\n xml_file.write(\"</mass>\\n\")\n\n charges = [atom.charge for atom in structure.atoms]\n xml_file.write(\"<charge>\\n\")\n e0 = 2.396452e-04 # e^2 mol/(kcal A), permittivity of free space\n charge_factor = (4.0 * np.pi * e0 * ref_distance * ref_energy) ** 0.5\n for charge in charges:\n xml_file.write(\"{}\\n\".format(charge / charge_factor))\n xml_file.write(\"</charge>\\n\")\n if forcefield:\n pair_coeffs = list(\n set(\n (atom.type, atom.epsilon, atom.sigma)\n for atom in structure.atoms\n )\n )\n pair_coeffs.sort(key=lambda pair_type: pair_type[0])\n xml_file.write(\"<pair_coeffs>\\n\")\n for param_set in pair_coeffs:\n xml_file.write(\n \"{}\\t{:.4f}\\t{:.4f}\\n\".format(\n param_set[0],\n param_set[1] / ref_energy,\n param_set[2] / ref_distance,\n )\n )\n xml_file.write(\"</pair_coeffs>\\n\")",
"def write_job_manifest(self):\n import time\n with open('bake-manifest-' + time.strftime('%Y-%m-%d-%H:%M:%S') + \n '.txt', 'w') as hout:\n for k, v in self.job.items():\n hout.write(';'.join([k, v]) + '\\n')",
"def generate_custom_metadata(uuid, version, update_proto):\n COMPONENTS = E.components\n COMPONENT = E.component\n ID = E.id\n NAME = E.name\n SUMMARY = E.summary\n PROVIDES = E.provides\n FIRMWARE = E.firmware\n CUSTOM = E.custom\n VALUE = E.value\n RELEASES = E.releases\n RELEASE = E.release\n LOCATION = E.location\n CHECKSUM = E.checksum\n DESCRIPTION = E.description\n P = E.p\n SIZE = E.size\n REQUIRES = E.requires\n\n metadata_tree = COMPONENTS(\n COMPONENT(\n ID(\"com.hacker.firmware\"),\n NAME(\"TotallyNotMalicious\"),\n SUMMARY(\"This is fine\"),\n PROVIDES(\n FIRMWARE(\n uuid,\n type=\"flashed\",\n ),\n ),\n CUSTOM(\n VALUE(\n update_proto,\n key=\"LVFS::UpdateProtocol\",\n ),\n ),\n RELEASES(\n RELEASE(\n LOCATION(\n url_for(\"serve_cab\", _external=True, uuid=uuid, version=version, update_proto=update_proto)\n ),\n DESCRIPTION(\n P(\"Totally not malicious ;)\"),\n ),\n CHECKSUM(\n hashlib.sha1(generate_cab(uuid=uuid, version=version, update_proto=update_proto)).hexdigest(),\n type=\"sha1\", filename=\"poc.cab\", target=\"container\",\n ),\n SIZE(\"1337\", type=\"download\"),\n SIZE(\"0\", type=\"installed\"),\n urgency=\"high\", version=version, timestamp=\"1587399600\", install_duration=\"120\",\n ),\n ),\n REQUIRES(),\n type=\"firmware\",\n ),\n origin=\"lvfs\", version=\"0.9\",\n )\n\n metadata = etree.tostring(metadata_tree, pretty_print=True, xml_declaration=True, encoding=\"UTF-8\")\n\n return gzip.compress(metadata)",
"def produce_manifest(self, file_name):\n\n #file = \"/data/out/tables/\"+str(file_name)+\".manifest\"\n file = file_name+\".manifest\"\n destination_part = file_name.split(\".csv\")[0]\n\n manifest_template = { # \"source\": \"myfile.csv\"\n # ,\"destination\": \"in.c-mybucket.table\"\n \"incremental\": True, \"primary_key\": [\"range\"]\n # ,\"columns\": [\"\"]\n # ,\"delimiter\": \"|\"\n # ,\"enclosure\": \"\"\n }\n if active_restaurant_loop:\n manifest_template[\"primary_key\"] = [\"range\", \"location_id\"]\n #column_header = []\n\n try:\n with open(file, 'w') as file_out:\n json.dump(manifest_template, file_out)\n logging.info(\n \"Output manifest file - {0} - produced.\".format(file))\n except Exception as e:\n logging.error(\"Could not produce output file manifest.\")\n logging.error(e)\n\n return",
"def gen_manifest(stage_dir):\n manifest = {'files': []}\n\n for root, dirs, files in os.walk(stage_dir):\n for file_ in files:\n fullpath = os.path.join(root, file_)\n contents = open(fullpath, 'rb').read()\n sha1 = hashlib.sha1(contents).hexdigest()\n filename = os.path.relpath(fullpath, stage_dir)\n mode = get_permission(fullpath)\n manifest['files'].append({'path': filename, 'sha1': sha1,\n 'mode': mode})\n return manifest",
"def create_particles(self):\n # xf, yf = create_fluid_with_solid_cube()\n xf, yf = create_fluid()\n uf = np.zeros_like(xf)\n vf = np.zeros_like(xf)\n m = initialize_mass(xf, yf)\n rho = initialize_density_fluid(xf, yf)\n h = np.ones_like(xf) * self.hdx * self.dx\n fluid = get_particle_array_wcsph(x=xf, y=yf, h=h, m=m, rho=rho, u=uf,\n v=vf, name=\"fluid\")\n\n xt, yt = create_boundary(self.dx / 2.)\n ut = np.zeros_like(xt)\n vt = np.zeros_like(xt)\n m = np.ones_like(xt) * 1500 * self.dx * self.dx\n rho = np.ones_like(xt) * 1000\n h = np.ones_like(xt) * self.hdx * self.dx / 2.\n tank = get_particle_array_wcsph(x=xt, y=yt, h=h, m=m, rho=rho, u=ut,\n v=vt, name=\"tank\")\n\n return [fluid, tank]",
"def setupParticles(self):\n\n for ss in self.pargs['species']:\n\n # Make sure we are setting up particles, not walls (so we check for id existence)\n if 'id' in ss and 'wall' not in ss:\n if not self.rank:\n logging.info('Setting up particles for group{id}'.format(**ss))\n\n randName = np.random.randint(10**5,10**8)\n pddName = 'pdd' + '{}'.format(np.random.randint(10**5,10**8))\n\n if 'vol_lim' not in ss:\n ss['vol_lim'] = 1e-20\n\n id = ss['id'] - 1\n self.lmp.command('group group{} type {}'.format(id, ss['id']))\n\n if 'args'in ss:\n args = ss['args']\n else:\n args = ()\n\n if 'radius' in ss:\n radius = ss['radius']\n\n if not isinstance(radius, tuple):\n radius = ('constant', radius)\n\n self.lmp.command('fix {} '.format(randName) + 'group{}'.format(id) + ' particletemplate/{style} 15485867 volume_limit {vol_lim} atom_type {id} density constant {density} radius'.format(**ss) + (' {}' * len(radius)).format(*radius) \\\n + (' {}' * len(args)).format(*args))\n else:\n self.lmp.command('fix {} '.format(randName) + 'group{}'.format(id) + ' particletemplate/{style} 15485867 volume_limit {vol_lim} atom_type {id} density constant {density}'.format(**ss) + (' {}' * len(args)).format(*args))\n \n self.lmp.command('fix {} '.format(pddName) + 'group{}'.format(id) + ' particledistribution/discrete 67867967 1'.format(**ss) + ' {} 1.0'.format(randName))\n\n if ss['style'] is 'multisphere':\n itype = ss['style']\n else:\n itype = 'nve/{style}'.format(**ss)\n\n #Do NOT unfix randName! Will cause a memory corruption error\n self.pddName.append(pddName)",
"def create(self, content=None, _tqdm=None):\n with open(self._manifest.path, 'w') as manifest_file:\n self._write_base_information(manifest_file)\n obj = content if content else self._reader\n self._write_core_part(manifest_file, obj, _tqdm)\n\n self.set_index()",
"def write_manifest ( self, **manifest_kw ):\n for package in self._subdirs.values():\n package.write_manifest ( **manifest_kw )",
"def generate_manifest_dict(self):\n\n annotations = dict()\n\n for build_project in self.projects.get('build', []):\n for annotation in build_project.get('annotation', []):\n annotations[annotation['name']] = annotation['value']\n\n product = annotations.get('PRODUCT', 'unknown')\n version = annotations.get('VERSION', 'unknown')\n bld_num = annotations.get('BLD_NUM', '9999')\n manifest_name = '{}-{}-{}'.format(product, version, bld_num)\n\n return {\n manifest_name: {\n 'remotes': self.remotes,\n 'defaults': self.defaults,\n 'projects': self.projects\n }\n }",
"def manifest(ctx, config): # use as `schematic manifest ...`\n try:\n logger.debug(f\"Loading config file contents in '{config}'\")\n ctx.obj = CONFIG.load_config(config)\n except ValueError as e:\n logger.error(\"'--config' not provided or environment variable not set.\")\n logger.exception(e)\n sys.exit(1)",
"def write_scram_toolfiles(self):\n from string import Template\n\n mkdirp(join_path(self.spec.prefix.etc, 'scram.d'))\n\n values = {}\n values['VER'] = self.spec.version\n values['PFX'] = self.spec.prefix\n\n fname = 'uuid-cms.xml'\n template = Template(\"\"\"<tool name=\"uuid\" version=\"$VER\">\n <lib name=\"uuid\"/>\n <client>\n <environment name=\"LIBUUID_BASE\" default=\"$PFX\"/>\n <environment name=\"LIBDIR\" default=\"$$LIBUUID_BASE/lib\"/>\n <environment name=\"INCLUDE\" default=\"$$LIBUUID_BASE/include\"/>\n </client>\n <runtime name=\"ROOT_INCLUDE_PATH\" value=\"$$INCLUDE\" type=\"path\"/>\n <use name=\"root_cxxdefaults\"/>\n <use name=\"sockets\"/>\n</tool>\"\"\")\n\n contents = template.substitute(values)\n self.write_scram_toolfile(contents, fname)\n\n fname = 'libuuid.xml'\n template = Template(\"\"\"<tool name=\"libuuid\" version=\"$VER\">\n <lib name=\"uuid\"/>\n <client>\n <environment name=\"LIBUUID_BASE\" default=\"$PFX\"/>\n <environment name=\"LIBDIR\" default=\"$$LIBUUID_BASE/lib\"/>\n <environment name=\"INCLUDE\" default=\"$$LIBUUID_BASE/include\"/>\n </client>\n <runtime name=\"ROOT_INCLUDE_PATH\" value=\"$$INCLUDE\" type=\"path\"/>\n <use name=\"root_cxxdefaults\"/>\n <use name=\"sockets\"/>\n</tool>\"\"\")\n\n contents = template.substitute(values)\n self.write_scram_toolfile(contents, fname)",
"def particle_to_yml(self, particles, filename, mode='w'):\n # open write append, if you want to start from scratch manually delete this fid\n fid = open(os.path.join(RESOURCE_PATH, filename), mode)\n\n fid.write('header:\\n')\n fid.write(\" particle_object: 'MULTIPLE'\\n\")\n fid.write(\" particle_type: 'MULTIPLE'\\n\")\n fid.write('data:\\n')\n\n for i in range(0, len(particles)):\n particle_dict = particles[i].generate_dict()\n\n fid.write(' - _index: %d\\n' % (i+1))\n\n fid.write(' particle_object: %s\\n' % particles[i].__class__.__name__)\n fid.write(' particle_type: %s\\n' % particle_dict.get('stream_name'))\n\n fid.write(' internal_timestamp: %16.3f\\n' %\n particle_dict.get('internal_timestamp'))\n\n for val in particle_dict.get('values'):\n if isinstance(val.get('value'), float):\n if val.get('value_id') is \"time_of_sample\":\n fid.write(' %s: %16.5f\\n' % (val.get('value_id'), val.get('value')))\n else:\n fid.write(' %s: %16.3f\\n' % (val.get('value_id'), val.get('value')))\n elif isinstance(val.get('value'), str):\n fid.write(\" %s: '%s'\\n\" % (val.get('value_id'), val.get('value')))\n else:\n fid.write(' %s: %s\\n' % (val.get('value_id'), val.get('value')))\n fid.close()",
"def _generate_manifest(self, input_data_path_list):\n # Given a list of S3 buckets, generate a manifest file\n # [\n # {\"prefix\": \"s3://customer_bucket/some/prefix/\"},\n # \"relative/path/to/data-1\",\n # \"relative/path/data-2\",\n # ...\n # ]\n manifest = []\n shared_prefix, key_path_list = self._get_prefix_and_relative_path(input_data_path_list)\n logger.info(f\"Generating manifest file with shared prefix '{shared_prefix}/' ...\")\n manifest.append({\"prefix\": shared_prefix + \"/\"})\n for relative_key_path in key_path_list:\n manifest.append(relative_key_path)\n\n manifest_file_path = self._write_manifest_to_s3(manifest_file=manifest)\n return manifest_file_path",
"def generate_and_update_manifest(idx_depth_dir, manifest, new_manifest_path):\n for element in manifest:\n sample_idx_path = os.path.join(\n idx_depth_dir, element.name + \".idxdepth\")\n if not os.path.isfile(sample_idx_path):\n message = \"Missing idxdepth result at sample: \" + sample_idx_path\n raise Exception(message)\n with open(sample_idx_path, 'r') as f:\n js = json.load(f)\n try:\n depth = js[\"autosome\"][\"depth\"]\n read_len = js[\"read_length\"]\n except KeyError:\n message = \"Missing required fields in idxdepth output at sample: \" + element.sample\n raise Exception(message)\n element.depth = depth\n element.read_len = read_len\n f.close()\n with open(new_manifest_path, 'w') as f:\n f.write(\"#ID\\tPath\\tDepth\\tRead_len\\n\")\n for element in manifest:\n f.write(element.to_string() + \"\\n\")\n f.close()",
"def generate_structure(flag):\r\n if os.path.isfile(\"packing.nfo\"):\r\n os.remove(os.path.abspath(\"packing.nfo\"))\r\n proc = subprocess.Popen(['PackingGeneration.exe', flag])\r\n proc.wait()\r\n if not os.path.isfile(\"packing.nfo\"):\r\n print('Try to change number of particles or size distribution.')\r\n raise Exception('Packing algorithm failed.')"
] | [
"0.62623477",
"0.61897093",
"0.61677384",
"0.60479397",
"0.58180106",
"0.5817169",
"0.5700848",
"0.56880516",
"0.5666152",
"0.56610906",
"0.55579436",
"0.554421",
"0.5497124",
"0.548652",
"0.5405297",
"0.5399095",
"0.538896",
"0.5375458",
"0.5372112",
"0.5355626",
"0.5350976",
"0.5342353",
"0.5340264",
"0.53291714",
"0.5323168",
"0.529049",
"0.52474064",
"0.52121586",
"0.52000785",
"0.5194838"
] | 0.7731379 | 0 |
Generate a soundscript file for music. | def generate_music_script(data: Property, pack_list):
# We also pack the filenames used for the tracks - that way funnel etc
# only get packed when needed. Stock sounds are in VPKS or in aperturetag/,
# we don't check there.
# The voice attrs used in the map - we can skip tracks
voice_attr = CONF['VoiceAttr', ''].casefold().split(';')
funnel = data.find_key('tbeam', '')
bounce = data.find_key('bouncegel', '')
speed = data.find_key('speedgel', '')
# The sounds must be present, and the items should be in the map.
has_funnel = funnel.value and (
'funnel' in voice_attr or
'excursionfunnel' in voice_attr
)
has_bounce = bounce.value and (
'bouncegel' in voice_attr or
'bluegel' in voice_attr
)
# Speed-gel sounds also play when flinging, so keep it always.
with open(os.path.join('bee2', 'inject', 'music_script.txt'), 'w') as file:
# Write the base music track
file.write(MUSIC_START.format(name='', vol='1'))
write_sound(file, data.find_key('base'), pack_list, snd_prefix='#*')
file.write(MUSIC_BASE)
# The 'soundoperators' section is still open now.
# Add the operators to play the auxilluary sounds..
if has_funnel:
file.write(MUSIC_FUNNEL_MAIN)
if has_bounce:
file.write(MUSIC_GEL_BOUNCE_MAIN)
if speed.value:
file.write(MUSIC_GEL_SPEED_MAIN)
# End the main sound block
file.write(MUSIC_END)
if has_funnel:
# Write the 'music.BEE2_funnel' sound entry
file.write('\n')
file.write(MUSIC_START.format(name='_funnel', vol='1'))
write_sound(file, funnel, pack_list, snd_prefix='*')
# Some tracks want the funnel music to sync with the normal
# track, others randomly choose a start.
file.write(
MUSIC_FUNNEL_SYNC_STACK
if data.bool('sync_funnel') else
MUSIC_FUNNEL_RAND_STACK
)
file.write(MUSIC_FUNNEL_UPDATE_STACK)
if has_bounce:
file.write('\n')
file.write(MUSIC_START.format(name='_gel_bounce', vol='0.5'))
write_sound(file, bounce, pack_list, snd_prefix='*')
# Fade in fast (we never get false positives, but fade out slow
# since this disables when falling back..
file.write(MUSIC_GEL_STACK.format(fadein=0.25, fadeout=1.5))
if speed.value:
file.write('\n')
file.write(MUSIC_START.format(name='_gel_speed', vol='0.5'))
write_sound(file, speed, pack_list, snd_prefix='*')
# We need to shut off the sound fast, so portals don't confuse it.
# Fade in slow so it doesn't make much sound (and also as we get
# up to speed). We stop almost immediately on gel too.
file.write(MUSIC_GEL_STACK.format(fadein=0.5, fadeout=0.1)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_audio():\n text, lang = introduction()\n ses = boto3.Session(profile_name=\"default\")\n pol = ses.client(\"polly\")\n res = pol.synthesize_speech(Text=text, LanguageCode=lang, OutputFormat=\"mp3\", VoiceId=VOICE)\n return res",
"def make_a_sound(): # document string\n print('quack')",
"def write_sound(file, snds: Property, pack_list, snd_prefix='*'):\n if snds.has_children():\n file.write('\"rndwave\"\\n\\t{\\n')\n for snd in snds:\n file.write(\n '\\t\"wave\" \"{sndchar}{file}\"\\n'.format(\n file=snd.value.lstrip(SOUND_CHARS),\n sndchar=snd_prefix,\n )\n )\n pack_list.add('sound/' + snd.value.casefold())\n file.write('\\t}\\n')\n else:\n file.write(\n '\"wave\" \"{sndchar}{file}\"\\n'.format(\n file=snds.value.lstrip(SOUND_CHARS),\n sndchar=snd_prefix,\n )\n )\n pack_list.add('sound/' + snds.value.casefold())",
"def create_wav_file(self, ):\n\n f_out = open(self.wav_file, 'w')\n u_utt2spk = open(self.utt2spk, 'w')\n for file in glob.glob(self.wav_folder+'/*.wav'):\n base = os.path.basename(file).split('.')[0]\n # write to scp file\n f_out.write(base + '\\t' + file + '\\n')\n u_utt2spk.write(base + '\\t' + 'tts' + '\\n')",
"def produce_music_start(self) -> str:\n try:\n self.folder_create(self.folder_config)\n value_path = os.path.join(self.folder_config, name_sound)\n if not (os.path.exists(value_path) and os.path.isfile(value_path)):\n audio_get = pafy.new(url=url_sound)\n best_audio = audio_get.getbestaudio()\n best_audio.download(filepath=value_path)\n return value_path\n except Exception as e:\n a = TelegramManager()\n a.proceed_message_values(f'We faced problem with the getting audio. Mistake: {e}')\n return ''",
"def create_audio_file():\n # Get the response from boto3\n raw_audio = generate_audio()\n # pull the Audiostream object from the response from boto3\n raw_audio = raw_audio[\"AudioStream\"]\n # create output location\n # process the whole block\n with closing(raw_audio) as audio:\n with open(\"output_audio.mp3\", \"wb\") as file:\n file.write(raw_audio.read())",
"def dump_pinball_music():\n\texport_sounds(song_labels, os.path.join(conf.path, 'music'), 'Music_')",
"def createMelody(song, outputSongFileName, timing=4):\n wavInput = (())\n wavInput1 = (())\n wavInput2 = (())\n wavInput3 = (())\n\n # Remove the beginning and end portions of the canvas that are blank\n while song[0] == ['R','R','R','R']:\n del song[0]\n while song[-1] == ['R','R','R','R']:\n del song[-1]\n\n for notesList in song:\n\n remove_dup(notesList)\n\n notesNum = []\n for i in range(len(notesList)):\n if (notesList[i].upper() == 'R'):\n notesNum.append('')\n elif (notesList[i].upper() == 'A' or notesList[i].upper() == 'B'):\n notesNum.append('3')\n else:\n notesNum.append('4')\n\n wavInput = ((notesList[0].lower() + str(notesNum[0]), timing),) + wavInput\n wavInput1 = ((notesList[1].lower() + str(notesNum[1]), timing),) + wavInput1\n wavInput2 = ((notesList[2].lower() + str(notesNum[2]), timing),) + wavInput2\n wavInput3 = ((notesList[3].lower() + str(notesNum[3]), timing),) + wavInput3\n\n\n wavInput = wavInput[::-1]\n wavInput1 = wavInput1[::-1]\n wavInput2 = wavInput2[::-1]\n wavInput3 = wavInput3[::-1]\n\n wavNames = [\".wav1.wav\",\".wav2.wav\",\".wav3.wav\",\".wav4.wav\"]\n wavInputs = [wavInput,wavInput1,wavInput2,wavInput3]\n\n validWavInputs = []\n\n for i in range(len(wavInputs)):\n if isAllRests(wavInputs[i]) == False:\n validWavInputs.append(wavInputs[i])\n\n validWavNames = wavNames[:len(validWavInputs)]\n\n call(['python','GenerateWavFiles.py',str(validWavNames) + \"@\" + str(validWavInputs)])\n\n sounds = []\n for i in range(len(validWavNames)):\n sounds.append(AudioSegment.from_wav(validWavNames[i]))\n\n combined = sounds[0]\n for i in range(1, len(sounds)):\n combined = combined.overlay(sounds[i])\n\n combined.export(outputSongFileName, format='wav')",
"def main(beatmap_sounds, effect_volume, music, music_volume, skin, input, output):\n output_format = os.path.splitext(output)[1][1:]\n\n bm_audios = load_sounds(beatmap_sounds) if beatmap_sounds else {}\n skin_audios = load_sounds(skin) if skin else {}\n\n beatmap = Beatmap.from_path(input)\n track = Track.from_beatmap(beatmap, bm_audios, skin_audios)\n beatmap_audio = track.compile()\n beatmap_audio = audioseg_adjust_volume(beatmap_audio, effect_volume)\n\n result = beatmap_audio\n\n if music:\n music_audio = AudioSegment.from_file(music)\n music_audio = audioseg_adjust_volume(music_audio, music_volume)\n\n result = music_audio.overlay(AudioSegment.silent(24) + result)\n\n result.export(output, output_format)\n\n return 0",
"async def generate_audio(self, site, text, payload):\n cache_path = self.config['services']['Pico2wavTtsService'].get(\n 'cache_path', '/tmp/tts_cache')\n value = payload.get('id', 'no_id')\n\n if text:\n short_text = text[0:100].replace(' ', '_').replace(\".\", \"\")\n # speakable and limited\n say_text = text[0:300].replace('(', '').replace(')', '')\n short_file_name = clean_filename('tts-' + str(short_text)) + '.wav'\n file_name = os.path.join(cache_path, short_file_name)\n\n # generate if file doesn't exist in cache\n if not os.path.isfile(file_name):\n path = self.config['services']['Pico2wavTtsService']['binary_path']\n command = path + ' -w=' + file_name + ' \"{}\" '.format(say_text)\n executor = concurrent.futures.ProcessPoolExecutor(\n max_workers=1,\n )\n await self.loop.run_in_executor(executor, os_system, command)\n\n async with aiofiles.open(file_name, mode='rb') as send_file:\n audio_file = await send_file.read()\n await self.client.subscribe('hermod/{}/speaker/finished'.format(site))\n if site in self.clients and self.clients[site].get(\n 'platform', '') == \"web\" and self.clients[site].get('url', False):\n await self.client.publish(\\\n 'hermod/{}/speaker/play/{}'.format(site, value), payload=json.dumps({\n \"url\": self.clients[site].get('url') + \"/tts/\" + short_file_name\n }), qos=0)\n else:\n slice_length = 2048\n\n def chunker(seq, size):\n \"\"\" return chunks\"\"\"\n return (seq[pos:pos + size] for pos in range(0, len(seq), size))\n for chunk in chunker(audio_file, slice_length):\n await self.client.publish('hermod/{}/speaker/cache/{}'.format(site, value)\\\n , payload=bytes(chunk), qos=0)\n\n # finally send play message with empty payload\n await self.client.publish(\n 'hermod/{}/speaker/play/{}'.format(site, value), payload=None, qos=0)\n\n await self.cleanup_file(short_text, file_name)",
"def make_wav(text, speed=1.0, emotion='normal', output_file='__temp.wav', output_dir=os.getcwd()):\n open_jtalk = [OPENJTALK_BINPATH + '/open_jtalk']\n mech = ['-x', OPENJTALK_DICPATH]\n htsvoice = ['-m', OPENJTALK_VOICEPATH.format(emotion=emotion)]\n speed = ['-r', str(speed)]\n outwav = ['-ow', os.path.join(output_dir, output_file)]\n cmd = open_jtalk + mech + htsvoice + speed + outwav\n c = subprocess.Popen(cmd,stdin=subprocess.PIPE)\n c.stdin.write(text.encode('utf-8'))\n c.stdin.close()\n c.wait()\n return os.path.join(output_dir, output_file)",
"def make_music_rand():\n pass",
"def create_mp3():\n\n #TODO: les roles ne devraient pas etre en dur\n list_all_roles = [\n [],\n [\"morgan\"],\n [\"oberon\"],\n [\"mordred\"],\n [\"morgan\", \"oberon\"],\n [\"morgan\", \"mordred\"],\n [\"oberon\", \"mordred\"],\n [\"morgan\", \"oberon\", \"mordred\"]\n ]\n\n for list_roles in list_all_roles:\n\n list_mp3 = [\"init.mp3\", \"serv_mord.mp3\"]\n if \"oberon\" in list_roles:\n list_mp3.append(\"oberon.mp3\")\n list_mp3.append(\"red_identi.mp3\")\n\n if \"morgan\" in list_roles:\n list_mp3.append(\"add_per_mor.mp3\")\n\n list_mp3.append(\"serv_mord.mp3\")\n if \"mordred\" in list_roles:\n list_mp3.append(\"mordred.mp3\")\n list_mp3.extend([\"merlin_identi.mp3\", \"end.mp3\"])\n\n mp3_combined = AudioSegment.empty()\n for mp3 in list_mp3:\n mp3_combined += AudioSegment.from_mp3(\"resources/{}\".format(mp3))\n\n mp3_combined.export(\"resources/_{}.mp3\".format('-'.join(sorted(list_roles))), format=\"mp3\")",
"def gen_sound_manifest(additional, excludes):\n if not additional:\n return # Don't pack, there aren't any new sounds..\n\n orig_manifest = os.path.join(\n '..',\n SOUND_MAN_FOLDER.get(CONF['game_id', ''], 'portal2'),\n 'scripts',\n 'game_sounds_manifest.txt',\n )\n\n try:\n with open(orig_manifest) as f:\n props = Property.parse(f, orig_manifest).find_key(\n 'game_sounds_manifest', [],\n )\n except FileNotFoundError: # Assume no sounds\n props = Property('game_sounds_manifest', [])\n\n scripts = [prop.value for prop in props.find_all('precache_file')]\n\n for script in additional:\n scripts.append(script)\n\n for script in excludes:\n try:\n scripts.remove(script)\n except ValueError:\n LOGGER.warning(\n '\"{}\" should be excluded, but it\\'s'\n ' not in the manifest already!',\n script,\n )\n\n # Build and unbuild it to strip other things out - Valve includes a bogus\n # 'new_sound_scripts_must_go_below_here' entry..\n new_props = Property('game_sounds_manifest', [\n Property('precache_file', file)\n for file in scripts\n ])\n\n inject_loc = os.path.join('bee2', 'inject', 'soundscript_manifest.txt')\n with open(inject_loc, 'w') as f:\n for line in new_props.export():\n f.write(line)\n LOGGER.info('Written new soundscripts_manifest..')",
"def output_sound():\n try:\n subprocess.call(['ffplay', '-nodisp', '-autoexit', SOUND_FILE])\n except:\n pass",
"def main():\n if (len(sys.argv) == 1):\n song = (\n ('c', 4), ('c*', 4), ('eb', 4),\n ('g#', 4), ('g*', 2), ('g5', 4),\n ('g5*', 4), ('r', 4), ('e5', 16),\n ('f5', 16), ('e5', 16), ('d5', 16),\n ('e5*', 4)\n )\n song = (\n ('a3',4), ('b3',4),('c4',4),('d4',4)\n )\n outputSongFile = \"testSong.wav\"\n timing = 4\n\n createMelody(song, outputSongFile, timing)\n playMelody(outputSongFile)\n\n else:\n song = str(sys.argv[1])\n outputSongFile = str(sys.argv[2])\n createMelody(song, outputSongFile, timing)",
"def setup_audio(self):\n\t\t\n\t\tpath_to_file = '/var/lib/snips/skills/snips_app_pilldispenser/settings/setup_audio.sh'\n\t\tsubprocess.call([path_to_file])",
"def create_audiobook():\n\n f = open(\"static/files/book.txt\", \"r\", encoding=\"utf-8\")\n summary = f.read()\n print('total chars: ', len(summary))\n all_words = summary.split('.')\n aflr.api_key = \"b6b1434676d14bdfbf9f50ca2157ed5c\"\n VOICE=\"Matthew\"\n current, total_chars, chunk_num, TEXT = 0,0,0,[]\n while current < len(all_words) - 1:\n while total_chars <= 4999:\n TEXT.append(all_words[current])\n total_chars += len(all_words[current]) + 1\n current += 1\n if current == len(all_words):\n break\n \n if current < len(all_words):\n TEXT.pop()\n current -= 1\n total_chars = 0\n\n TEXT = \".\".join(TEXT)\n\n SPEED=80\n script = aflr.Script().create(\n scriptText=TEXT,\n projectName=\"may_the_4th\",\n moduleName=\"evil\",\n scriptName=f\"{chunk_num}_evil_{VOICE}\",\n )\n print(f\"Connect to the dev star: \\n {script} \\n\")\n\n scriptId = script[\"scriptId\"]\n\n response = aflr.Speech().create(\n scriptId=scriptId, voice=VOICE, speed=SPEED, #effect=EFFECT\n )\n # print(f\"Response from dev star: \\n {response} \\n\")\n # mastering current\n response = aflr.Mastering().create(\n scriptId=scriptId, #backgroundTrackId=BACKGROUNDTRACK\n )\n # print(f\"Using the force: \\n {response} \\n\")\n\n url = aflr.Mastering().retrieve(scriptId=scriptId)\n #print(f\"url to download the track: \\n {url} \\n\")\n\n # or download\n file = aflr.Mastering().download(\n scriptId=scriptId, destination=MINI_PATH\n )\n # print(f\"Listen to the results of the force: \\n {file} \\n\")\n\n print(\"finished\",chunk_num)\n\n TEXT = []\n chunk_num += 1\n\n play_audio()",
"def main():\n #follow_line()\n #data, samplerate = sf.read('youtube_8660.wav')\n #sf.write('new_file.ogg', data, samplerate)\n beep_for_color()",
"def save(filename_audio, filename_jam, jam, strict=True, fmt=\"auto\", **kwargs):\n\n y = jam.sandbox.muda._audio[\"y\"]\n sr = jam.sandbox.muda._audio[\"sr\"]\n\n # First, dump the audio file\n psf.write(filename_audio, y, sr, **kwargs)\n\n # Then dump the jam\n jam.save(filename_jam, strict=strict, fmt=fmt)",
"def export_sounds(names, path, base_label='Sound_'):\n\tfor filename, output in dump_sounds(names, base_label):\n\t\twith open(os.path.join(path, filename), 'w') as out:\n\t\t\tout.write(output)",
"def playstim(self,stim):\n\n # Make the wave file\n\n # Play it using an external player\n if platform.system()==\"Linux\":\n\n # Generate the wave file for this stimulus\n self.make_hyde_peretz_wav(stim,'stim.wav')\n \n pygame.mixer.music.load(\"stim.wav\")\n pygame.mixer.music.play()\n pygame.time.wait(2000)\n\n \"\"\"\n # Make the stimulus (this is just concatenating)\n vals = self.generate_hyde_peretz(stim)\n\n # open stream\n stream = self.p.open(format = self.SAMPLEWIDTH,\n channels = self.NCHANNELS,\n rate = self.SAMPLEFREQ,\n output = True)\n\n stream.write(vals)\n stream.close()\n \"\"\"\n\n\n\n elif os.name==\"posix\": # That means we are in Mac OS\n\n # Generate a wave file\n self.make_hyde_peretz_wav(stim,'stim.wav')\n\n # And play it using the external player\n call([\"afplay\", \"stim.wav\"]) # use in MacOS",
"def generateNotes():\r\n fs = 44100 # hertz\r\n seconds = 3 # Note duration of 3 seconds\r\n noteNames = [\"C4\", \"D4\", \"E4\", \"F4\", \"G4\", \"A4\", \"B4\"]\r\n for noteName in noteNames:\r\n myNote = music21.note.Note(noteName)\r\n noteFrequency = myNote.pitch.frequency\r\n # Generate array with seconds*sample_rate steps, ranging between 0 and seconds\r\n t = np.linspace(0, seconds, seconds * fs, False)\r\n\r\n # Generate a 440 Hz sine wave\r\n sound = np.sin(noteFrequency * t * 2 * np.pi)\r\n\r\n # Ensure that highest value is in 16-bit range\r\n audio = sound * (2**15 - 1) / np.max(np.abs(sound))\r\n # Convert to 16-bit data\r\n audio = audio.astype(np.int16)\r\n\r\n # Start playback\r\n play_obj = sa.play_buffer(audio, 1, 2, fs)\r\n\r\n # Wait for playback to finish before exiting\r\n play_obj.wait_done()\r\n\r\n #Write sound to file\r\n sf.write('assets/patterns/'+noteName+'.wav', audio, fs)",
"def synthesize_ssml_file(ssml_file):\n from google.cloud import texttospeech\n client = texttospeech.TextToSpeechClient()\n\n with open(ssml_file, 'r') as f:\n ssml = f.read()\n input_text = texttospeech.types.SynthesisInput(ssml=ssml)\n\n # Note: the voice can also be specified by name.\n # Names of voices can be retrieved with client.list_voices().\n voice = texttospeech.types.VoiceSelectionParams(\n language_code='en-AU',\n name='en-AU-Wavenet-D',\n ssml_gender=texttospeech.enums.SsmlVoiceGender.NEUTRAL)\n\n audio_config = texttospeech.types.AudioConfig(\n audio_encoding=texttospeech.enums.AudioEncoding.MP3,\n speaking_rate=0.80)\n\n response = client.synthesize_speech(input_text, voice, audio_config)\n\n # The response's audio_content is binary.\n filename = ssml_file\n try:\n filename = filename.replace('.txt', '.mp3')\n filename = filename.replace('../Articles/', '')\n filename = filename.replace(';', ' ')\n filename = filename.replace(\"'\", \" \")\n except Exception as e:\n print(e)\n print('Check replace command in synthesize_file.py file')\n\n with open(filename, 'wb') as out:\n out.write(response.audio_content)\n print(f'Audio content written to file: \\n{filename}\\n')",
"def main():\n\n # Parse arguments\n parser = OptionParser()\n parser.add_option('-n', '--subscription_key', dest='subscription_key',\n help='subscription_key for authentication')\n parser.add_option('-t', '--text', dest='text',\n help='text to synthesize')\n parser.add_option('-l', '--language', dest='language',\n help='language')\n parser.add_option('-g', '--gender', dest='gender',\n help='gender')\n parser.add_option('-d', '--directory', dest='directory',\n help='directory to store the file')\n (options, args) = parser.parse_args()\n subscription_key = options.subscription_key\n text = options.text\n language = options.language\n gender = options.gender\n directory = options.directory\n\n # Perform sanity checks on options\n validate_options(subscription_key, text)\n\n if not directory:\n directory = default_directory\n\n if not language:\n language = default_language\n\n if not gender:\n gender = default_gender\n\n # format = 'riff-16khz-16bit-mono-pcm'\n format = 'riff-8khz-8bit-mono-mulaw'\n\n # lang = 'en-AU'\n # gender = 'Female'\n tts_msspeak = MSSpeak(subscription_key, '/tmp/')\n tts_msspeak.set_cache(False)\n output_filename = tts_msspeak.speak(text, language, gender, format)\n\n print 'Recorded TTS to %s%s' % (directory, output_filename)",
"def output_beat_to_file(file_name, e):\n print(\"Writing to file:\", file_name)\n routine = gp.compile(e,pset)\n with open(file_name+\".raw\",'w') as f:\n for t in range(200000):\n f.write(chr(int(routine(t+1))%256))\n # Now convert to wav\n subprocess.call(SOX_COMMAND + \" \" + file_name + \".raw\" + \" \" + file_name + \".wav\", shell=True)\n subprocess.call(LAME_COMMAND + \" \" + file_name + \".wav\", shell=True)",
"def write_music(self, length, bpm=75, initial_note=\"C\",\n initial_rhythm=\"0.25\", initial_key=\"F\"):\n # Translate the string key into an index for our note generator\n key_number = self.FREQUENCY_LIST.index(initial_key)\n print key_number\n\n rhythms = self.generate_rhythms(\n self.learner.ngrams[\"rhythms\"], initial_rhythm, bpm, length)\n notes = self.generate_notes(\n self.learner.ngrams[\"notes\"], initial_note, len(rhythms),\n key_number)\n\n rhythms = map(lambda x: float(60) * x / bpm, rhythms)\n notes = map(lambda x: self.FREQUENCY_MAP[x], notes)\n\n for i, n in enumerate(notes):\n self.write_note(self.make_note_sound(notes[i], rhythms[i]))",
"def playSound():\n\tif os.name == \"posix\":\n\t\tduration = 0.5 # seconds\n\t\tfreq = 80 # Hz\n\t\t#os.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))\n\telif os.name == \"nt\":\n\t\tduration = 500 # milliseconds\n\t\tfreq = 80 # Hz\n\t\t#winsound.Beep(freq, duration)",
"def synth_midi(midi_path, output_path, sampling_rate=44100, sf2_path=SOUNDFONT_PATH):\n midi = pretty_midi.PrettyMIDI(midi_path)\n raw_wav = midi.fluidsynth(fs=sampling_rate, sf2_path=sf2_path)\n wave.write(output_path, sampling_rate, raw_wav)",
"def create_sound_map():\n pass"
] | [
"0.69354683",
"0.69074094",
"0.6694641",
"0.6556796",
"0.6548967",
"0.6373594",
"0.6314922",
"0.62685734",
"0.62347925",
"0.61781305",
"0.614819",
"0.6109333",
"0.61045724",
"0.60798234",
"0.6031474",
"0.60156626",
"0.5977319",
"0.5961787",
"0.5923874",
"0.5900185",
"0.5889323",
"0.58873755",
"0.58769405",
"0.587482",
"0.58687025",
"0.5858703",
"0.5845527",
"0.584328",
"0.58155686",
"0.58121246"
] | 0.7483942 | 0 |
Write either a single sound, or multiple rndsound. snd_prefix is the prefix for each filename , , @, etc. | def write_sound(file, snds: Property, pack_list, snd_prefix='*'):
if snds.has_children():
file.write('"rndwave"\n\t{\n')
for snd in snds:
file.write(
'\t"wave" "{sndchar}{file}"\n'.format(
file=snd.value.lstrip(SOUND_CHARS),
sndchar=snd_prefix,
)
)
pack_list.add('sound/' + snd.value.casefold())
file.write('\t}\n')
else:
file.write(
'"wave" "{sndchar}{file}"\n'.format(
file=snds.value.lstrip(SOUND_CHARS),
sndchar=snd_prefix,
)
)
pack_list.add('sound/' + snds.value.casefold()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def export_sounds(names, path, base_label='Sound_'):\n\tfor filename, output in dump_sounds(names, base_label):\n\t\twith open(os.path.join(path, filename), 'w') as out:\n\t\t\tout.write(output)",
"def write_wav(fname, samps, sampling_rate=16000, normalize=True):\n\t# for multi-channel, accept ndarray [Nsamples, Nchannels]\n\tif samps.ndim != 1 and samps.shape[0] < samps.shape[1]:\n\t\tsamps = np.transpose(samps)\n\t\tsamps = np.squeeze(samps)\n\t# same as MATLAB and kaldi\n\tif normalize:\n\t\tsamps = samps * MAX_INT16\n\t\tsamps = samps.astype(np.int16)\n\tfdir = os.path.dirname(fname)\n\tif fdir and not os.path.exists(fdir):\n\t\tos.makedirs(fdir)\n\t# NOTE: librosa 0.6.0 seems could not write non-float narray\n\t# so use scipy.io.wavfile instead\n\twavfile.write(fname, sampling_rate, samps)",
"def create_wav_file(self, ):\n\n f_out = open(self.wav_file, 'w')\n u_utt2spk = open(self.utt2spk, 'w')\n for file in glob.glob(self.wav_folder+'/*.wav'):\n base = os.path.basename(file).split('.')[0]\n # write to scp file\n f_out.write(base + '\\t' + file + '\\n')\n u_utt2spk.write(base + '\\t' + 'tts' + '\\n')",
"def setSound(self):\r\n self._shipsound = Sound('pew1.wav')\r\n self._aliensound = Sound('pew2.wav')\r\n self._shipexplode = Sound('blast1.wav')\r\n self._alienexplode = Sound('pop1.wav')",
"def save_sound(filename,sound,sample_freq,num_channels):\n #open a wave file in write ('w') mode, this will create the file\n file=wave.open(filename,'w')\n #set the framerate aka sample frequency\n file.setframerate(sample_freq)\n #set the number of the channels\n file.setnchannels(num_channels)\n #the size of the one sample in bytes\n file.setsampwidth(2)\n #write the actual sound to the file, notice the call to get_raw\n file.writeframesraw(sound.get_raw())\n file.close()",
"def generateNotes():\r\n fs = 44100 # hertz\r\n seconds = 3 # Note duration of 3 seconds\r\n noteNames = [\"C4\", \"D4\", \"E4\", \"F4\", \"G4\", \"A4\", \"B4\"]\r\n for noteName in noteNames:\r\n myNote = music21.note.Note(noteName)\r\n noteFrequency = myNote.pitch.frequency\r\n # Generate array with seconds*sample_rate steps, ranging between 0 and seconds\r\n t = np.linspace(0, seconds, seconds * fs, False)\r\n\r\n # Generate a 440 Hz sine wave\r\n sound = np.sin(noteFrequency * t * 2 * np.pi)\r\n\r\n # Ensure that highest value is in 16-bit range\r\n audio = sound * (2**15 - 1) / np.max(np.abs(sound))\r\n # Convert to 16-bit data\r\n audio = audio.astype(np.int16)\r\n\r\n # Start playback\r\n play_obj = sa.play_buffer(audio, 1, 2, fs)\r\n\r\n # Wait for playback to finish before exiting\r\n play_obj.wait_done()\r\n\r\n #Write sound to file\r\n sf.write('assets/patterns/'+noteName+'.wav', audio, fs)",
"def sound_effects(sound):\n global effect # Making effect global so it can be used outside this function\n effect = pygame.mixer.Sound(sound) # Loading sound files\n effect.play(0) # Playing sound files",
"def dump_sounds(names, base_label='Sound_'):\n\tpointer_length = 2\n\tpointer_address = 0x4ca2\n\t# sfx: pointer_address = 0x63ce\n\n\taddresses = []\n\tfor i, name in enumerate(names):\n\t\tsong_index, bank = get_song_bank(i)\n\t\taddress = read_address_pointer(\n\t\t\t(bank - 1) * 0x4000 +\n\t\t\tpointer_address +\n\t\t\tsong_index * pointer_length\n\t\t)\n\t\taddresses += [address]\n\n\t# Do an extra pass to grab labels from each song.\n\t# There's no getting around this since the\n\t# Graveyard themes share labels.\n\n\tsounds = {}\n\tall_labels = []\n\tfor name, address in zip(names, addresses):\n\t\tsound = Sound(address, base_label + name)\n\t\tsounds[name] = sound\n\t\tall_labels += sound.labels\n\n\toutputs = []\n\tfor name, address in zip(names, addresses):\n\t\tsound = sounds[name]\n\t\toutput = sound.to_asm(all_labels) + '\\n'\n\t\tfilename = name.lower() + '.asm'\n\t\toutputs += [(filename, output)]\n\n\treturn outputs",
"def crate_tone(frequency):\n sample_rate, size, _channels = get_init()\n sample_num = round(sample_rate / frequency)\n amplitude = 2 ** (abs(size) - 1) - 1\n sampler = ((amplitude if i < sample_num // 2 else -amplitude) for i in range(sample_num))\n samples = array('h', sampler)\n return pygame.mixer.Sound(samples)",
"def handle_sound_ext(sid):\n Popen([\"afplay\", ds.sound_map[int(sid)]])\n\n # red = 1, green = 2, yellow = 3, blue = 4\n # intensity from 0 to 3 lights activated\n serial.write(\"{}{}\\n\".format(sid, 3).encode('ascii'))",
"def playSound():\n\tif os.name == \"posix\":\n\t\tduration = 0.5 # seconds\n\t\tfreq = 80 # Hz\n\t\t#os.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))\n\telif os.name == \"nt\":\n\t\tduration = 500 # milliseconds\n\t\tfreq = 80 # Hz\n\t\t#winsound.Beep(freq, duration)",
"def make_a_sound(): # document string\n print('quack')",
"def phraseSound(self, toks):\n\t\tdef head(l):\n\t\t\treturn l[0] if l else None\n\t\ts = [head(self.word.get(t,[''])) for t in toks]\n\t\t#print('phraseSound(',toks,')=',s)\n\t\tif not all(s):\n\t\t\treturn []\n\t\t# nuke numbers, join into one string\n\t\tt = ' '.join([re.sub('\\d+', '', x) for x in s])\n\t\t# nuke consecutive duplicate sounds\n\t\tu = re.sub('(\\S+) \\\\1 ', '\\\\1 ', t)\n\t\tv = u.split()\n\t\t#print('phraseSound2=',v)\n\t\treturn v",
"def add_sounds(self) -> None:\n self.sounds.append(arcade.Sound(\"sounds/minecraft-theme.mp3\"))\n self.sounds.append(arcade.Sound(\"sounds/starcraft-theme.mp3\"))\n self.sounds.append(arcade.Sound(\"sounds/player_attack.mp3\"))",
"def generate_audio():\n text, lang = introduction()\n ses = boto3.Session(profile_name=\"default\")\n pol = ses.client(\"polly\")\n res = pol.synthesize_speech(Text=text, LanguageCode=lang, OutputFormat=\"mp3\", VoiceId=VOICE)\n return res",
"def output_sound():\n try:\n subprocess.call(['ffplay', '-nodisp', '-autoexit', SOUND_FILE])\n except:\n pass",
"def write_wav(self, full_out_file = None):\n\n if full_out_file is None:\n \n (out_file, out_dir) = misc.save_file(FilterSpec='*.wav', DialogTitle='Write sound to ...', \n DefaultName='')\n full_out_file = os.path.join(out_dir, out_file)\n if full_out_file is None:\n print('Output discarded.')\n return 0\n else:\n full_out_file = os.path.abspath(full_out_file)\n (out_dir , out_file) = os.path.split(full_out_file)\n\n write(str(full_out_file), int(self.rate), self.data)\n print('Sounddata written to ' + out_file + ', with a sample rate of ' + str(self.rate))\n print('OutDir: ' + out_dir)\n \n return full_out_file",
"def TestSound():\n SoundsPath = os.path.join(AudioFilesPath, MySet.Sound + \".mp3\")\n Parent.PlaySound(SoundsPath, MySet.Volume*0.01)",
"def save_wavetables(self, path: str, filename_prefix: str = '') -> None:\n for i in range(len(self.wavetables)):\n if not os.path.exists(path):\n os.mkdir(path)\n location = os.path.join(path, filename_prefix + f'{i:02d}.wav')\n wav_file = WavFile(location)\n wav_file.write_samples(self.wavetables[i])",
"def save_sample(file_path, sampling_rate, audio):\n audio = (audio.numpy() * 32768).astype(\"int16\")\n write(file_path, sampling_rate, audio)",
"def synth_midi(midi_path, output_path, sampling_rate=44100, sf2_path=SOUNDFONT_PATH):\n midi = pretty_midi.PrettyMIDI(midi_path)\n raw_wav = midi.fluidsynth(fs=sampling_rate, sf2_path=sf2_path)\n wave.write(output_path, sampling_rate, raw_wav)",
"def gen_random_samples():\n if os.path.exists('Song_Samples'):\n pass\n else:\n os.mkdir('Song_Samples')\n for filename in os.listdir(\"Songs\"):\n rate, data = wavfile.read(os.path.join(\"Songs\", filename))\n song_duration = len(data) // rate\n start_point = randint(0, song_duration - SAMPLE_DURATION)\n end_point = start_point + SAMPLE_DURATION\n subprocess.call(['ffmpeg', '-i', os.path.join(\"Songs\", filename),\n '-ss', str(datetime.timedelta(seconds=start_point)), '-to',\n str(datetime.timedelta(seconds=end_point)), '-y', os.path.join(\"Song_Samples\", filename)])",
"def play(sound):\n if SOUNDDIR != \"\":\n call([\"aplay\", SOUNDDIR + sound])",
"async def random_sound(mood=None, blocking=True):\n mood = mood or choice(list(Mood))\n name = choice(sounds[mood])\n print('playing:', mood, name)\n await api.audio.play(name, blocking=blocking)",
"def play_for(sample_wave, ms):\n sound = pygame.sndarray.make_sound(sample_wave)\n sound.play(-1)\n pygame.time.delay(ms)\n sound.stop()",
"def _write(self, source, times=1, file_flag=False, rs_times=None, rs_step=None):\n # if the device isnt initialized properly\n if self._device is None:\n raise SpeakerError\n\n self._duration = None\n self._paused = False\n self._canceled = False\n\n try:\n periodsize = Speaker.PERIOD_SIZE\n\n if file_flag:\n # Open the wav file\n f = wave.open(self._fix_path(source), 'rb') # add error checking here\n\n channels = f.getnchannels()\n framerate = f.getframerate()\n sample_width = f.getsampwidth()\n\n # Read data from file\n data = []\n sample = f.readframes(periodsize)\n while sample:\n data.append(sample)\n sample = f.readframes(periodsize)\n\n # Close file\n f.close()\n else:\n channels = self._channels\n framerate = self.framerate\n sample_width = self.SAMPLE_WIDTH\n\n # Read data from encoded string\n n = len(source)\n step = sample_width * periodsize\n data = [source[i:i+step] for i in range(0, n, step)] # add error checking here\n\n # calculate the duration of the track\n packets = len(data)\n packet_duration = periodsize / self.framerate\n self._duration = (packets * packet_duration)\n\n # Set Device attributes for playback\n self._device.setchannels(channels) # add error checking here\n self._device.setrate(framerate)\n self._device.setperiodsize(periodsize)\n \n # 8bit is unsigned in wav files\n if sample_width == 1:\n self._device.setformat(alsaaudio.PCM_FORMAT_U8)\n # Otherwise we assume signed data, little endian\n elif sample_width == 2:\n self._device.setformat(alsaaudio.PCM_FORMAT_S16_LE)\n elif sample_width == 3:\n self._device.setformat(alsaaudio.PCM_FORMAT_S24_3LE)\n elif sample_width == 4:\n self._device.setformat(alsaaudio.PCM_FORMAT_S32_LE)\n else:\n raise ValueError('Unsupported format')\n\n # Play n times the data\n \n self._play(data, times, rs_times, rs_step) # add error checking here\n except alsaaudio.ALSAAudioError as e:\n print(f\"Caugh is write: {e}\")\n raise SpeakerError\n\n except Exception as e:\n print(f\"Caugh is write: {e}\")\n raise SpeakerError",
"def sound(bool): #py:sound\n RUR._sound_(bool)",
"def write_sound(self, current_sound_conf):\n print \"SOUND as parameter: \", current_sound_conf\n try:\n if current_sound_conf:\n current_sound_str = 'ON'\n else:\n current_sound_str = 'OFF'\n self.store.put(SOUND_STORE, sound=current_sound_str)\n except:\n print \"Error: cannot save game sound configuration!\"",
"def sound(self, frame_rate=400, tone_range=None, amplitude=1E3,\n distance=True, samples=False, fname='song-fHOPs.wav'):\n tone_range = [50, 1000] if tone_range is None else tone_range\n duration = frame_rate / 1000.0\n amp = amplitude\n rate = 44100\n t = np.linspace(0.0, duration, int(duration * rate))\n\n def note(freq):\n \"\"\"Generate a sinusoidal note.\n\n :param float freq: frequency to generate the note from.\n :return: note.\n :rtype: array_like shape (duration * rate,).\n \"\"\"\n data = np.sin(2.0 * np.pi * freq * t) * amp\n return data\n\n scaler = MinMaxScaler(feature_range=tone_range)\n\n if isinstance(samples, bool):\n data = self.data\n elif isinstance(samples, int):\n data = self.sample(samples)\n else:\n data = samples\n\n if distance:\n centroide = self.pca.transform(self.median.reshape(1, -1))\n dists = cdist(centroide, self.pca.transform(data))[0]\n dists = scaler.fit_transform(dists.reshape(-1, 1))\n song = [np.array(note(d)) for d in dists]\n else:\n data = scaler.fit_transform(data)\n song = [np.sum([note(tone) for tone in curve],\n axis=0) for curve in data]\n\n # two byte integers conversion\n wavfile.write(fname, rate,\n np.array(song).astype(np.int16).flatten('C'))",
"def play_sound(self, sound) -> None:\n pass"
] | [
"0.6428006",
"0.63598406",
"0.63356966",
"0.62533575",
"0.613192",
"0.6080928",
"0.6072929",
"0.60464036",
"0.60073996",
"0.6006992",
"0.6005394",
"0.59996164",
"0.5970193",
"0.5965686",
"0.59485584",
"0.59431666",
"0.59283555",
"0.59256744",
"0.59218687",
"0.5920869",
"0.58924",
"0.5883333",
"0.5854458",
"0.5836992",
"0.58284736",
"0.58221966",
"0.5821302",
"0.5799694",
"0.57651",
"0.57336634"
] | 0.8022576 | 0 |
Generate the names of files to inject, if they exist.. | def inject_files():
for filename, arcname in INJECT_FILES.items():
filename = os.path.join('bee2', 'inject', filename)
if os.path.exists(filename):
yield filename, arcname
# Additionally add files set in the config.
for prop in CONF.find_children('InjectFiles'):
filename = os.path.join('bee2', 'inject', prop.real_name)
if os.path.exists(filename):
yield filename, prop.value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def processed_file_names(self):\n if self.force_reprocess == True:\n self.force_reprocess = False\n return 'reprocess.pt'\n \n ''' HR 01/06/22 Workaround to avoid FileNotFoundError '''\n print('self.processed_dir:', self.processed_dir)\n # folder,file = os.path.split(self.processed_dir)\n folder = self.processed_dir\n if not os.path.isdir(folder):\n print(' Making folder', folder)\n os.makedirs(folder)\n \n processedfiles = [f for f in os.listdir(self.processed_dir) if os.path.isfile(\n os.path.join(self.processed_dir, f))]\n if 'pre_filter.pt' in processedfiles:\n processedfiles.remove('pre_filter.pt')\n if 'pre_transform.pt' in processedfiles:\n processedfiles.remove('pre_transform.pt')\n # 'not_implimented.pt' #[f'data_{i}.pt' for i in list(self.data.index)]\n return processedfiles",
"def get_tweet_file_names(self) -> None:\n self.list_of_files = os.listdir(self.input_file_path)\n no_of_files = len(self.list_of_files)\n for iterator in range(0, no_of_files):\n self.list_of_files[iterator] = self.input_file_path + \"\\\\\" + self.list_of_files[iterator]\n print(\"no of json files \",no_of_files)",
"def discover(self):\n ids = []\n for f in os.listdir(self.dirname):\n if self.file_prefix in f:\n ids.append(self.inv_filename(f))\n return sorted(ids)",
"def generate_all_files():\n for (name, fn) in lang_module.targets.items():\n path = of_g.options.install_dir + '/' + name\n os.system(\"mkdir -p %s\" % os.path.dirname(path))\n with open(path, \"w\") as outfile:\n fn(outfile, os.path.basename(name))\n print(\"Wrote contents for \" + name)",
"def prep_files(app):\n smali_paths = []\n start = time.time()\n \n for root, dirs, files in os.walk(app, topdown=False):\n for name in files:\n if name[-6:] == \".smali\":\n smali_paths.append(str(os.path.join(root, name)))\n \n return smali_paths",
"def collected_filename(cfg, collect_dir, i=None):\n if i is not None:\n file = cfg[\"files\"][i]\n else:\n file = cfg[\"file\"]\n ext = path.splitext(file)[1]\n name = cfg[\"id\"]\n if i is not None:\n name += \"_\" + str(i)\n return path.join(collect_dir, name + ext)",
"def get_files(self):\r\n for filename in self.generated_files:\r\n path = os.path.join(CONFIGURATION.source_messages_dir, filename)\r\n exists = os.path.exists(path)\r\n self.assertTrue(exists, msg='Missing file: %s' % filename)\r\n if exists:\r\n yield path",
"def _generate_src():\n for ext in extensions:\n yield self.src_format[ext](f=\"{}{}\".format(name, ext))",
"def _get_files(self):\n # pylint: disable=unused-variable\n for dirpath, __, filenames in os.walk(self.start_location):\n for file_ in filenames:\n if file_.endswith('.py'):\n yield \"{0}{1}\".format(dirpath, file_)",
"async def filename_generator(self):\n chars = list(string.ascii_letters+string.digits)\n name = ''\n for i in range(random.randint(9, 25)):\n name += random.choice(chars)\n\n if name not in self.player['audio_files']:\n return name\n\n return await self.filename_generator()",
"def filenames(self):\n pass",
"def filename(i):\n rand_name = os.path.join(os.getcwd(), \"input-%d.txt\" % i)\n ref_name = os.path.join(os.getcwd(), \"input-%d.ref\" % i)\n return rand_name, ref_name",
"def fixupFileNames(process):\n if not hasattr(process.source, \"fileNames\"):\n process.source.fileNames = cms.untracked.vstring()\n return",
"def make_files(self):\n return []",
"def pick_names(root_dir):\n yield from glob.glob(root_dir + '*.hpp')\n yield from glob.glob(root_dir + '*.cpp')\n yield from glob.glob(root_dir + '*/*.hpp')\n yield from glob.glob(root_dir + '*/*.cpp')",
"def _filenames(self, dir_or_file):\n if os.path.isdir(dir_or_file):\n return glob(os.path.join(dir_or_file, \"*.txt\"))\n else:\n return [dir_or_file]",
"def genPathCopasi(self,nameBase,suffix=\".cps\"):\n i=0\n nameFree=False\n while not nameFree:\n copasi_filename = os.path.join(self.run_dir,nameBase+\n str(i)+suffix)\n nameFree = not os.path.exists(copasi_filename)\n i=i+1\n return copasi_filename",
"def _get_file_names():\n file_names = {}\n file_names['train'] = ['data_batch_%d' % i for i in xrange(1, 6)]\n file_names['test'] = ['test_batch']\n\n return file_names",
"def process(filename, exclude_dirs=['unittest','test','site-packages']):\n print(\"Generating {}\".format(filename))\n nb = 0\n nb_err = 0\n _main_root = os.path.dirname(filename)\n _VFS = {}\n for _mydir in (\"libs\", \"Lib\"):\n for _root, _dir, _files in os.walk(os.path.join(_main_root, _mydir)):\n #if _root.endswith('lib_migration'):\n _flag=False\n for _exclude in exclude_dirs:\n if _exclude in _root: #_root.endswith(_exclude):\n _flag=True\n continue\n if _flag:\n continue # skip these modules\n if '__pycache__' in _root:\n continue\n nb += 1\n\n for _file in _files:\n _ext = os.path.splitext(_file)[1]\n if _ext not in ('.js', '.py'):\n continue\n if re.match(r'^module\\d+\\..*$', _file):\n continue\n nb += 1\n\n file_name = os.path.join(_root, _file)\n _data = open(file_name, encoding='utf-8').read()\n \n if _ext == '.py':\n _data = python_minifier.minify(_data, preserve_lines=True)\n\n _vfs_filename = os.path.join(_root, _file).replace(_main_root, '')\n _vfs_filename = _vfs_filename.replace(\"\\\\\", \"/\")\n\n if _vfs_filename.startswith('/libs/crypto_js/rollups/'):\n if _file not in ('md5.js', 'sha1.js', 'sha3.js',\n 'sha224.js', 'sha384.js', 'sha512.js'):\n continue\n\n mod_name = _vfs_filename[len(_mydir) + 2:].replace('/', '.')\n mod_name, ext = os.path.splitext(mod_name)\n is_package = mod_name.endswith('__init__')\n if is_package:\n mod_name = mod_name[:-9]\n _VFS[mod_name] = [ext, _data, 1]\n else:\n _VFS[mod_name] = [ext, _data]\n print((\"adding %s %s\" % (mod_name, _vfs_filename)))\n print('%s files, %s errors' % (nb, nb_err))\n with open(filename, \"w\") as file_to_write_VFS:\n file_to_write_VFS.write('__BRYTHON__.use_VFS = true;\\n')\n file_to_write_VFS.write('__BRYTHON__.VFS=%s;\\n\\n' % json.dumps(_VFS))",
"def processed_file_names(self):\n # For 'trainval', we use files from 'train' and 'val' to save\n # memory\n if self.stage == 'trainval' and self.val_mixed_in_train:\n return [\n osp.join('train', self.pre_transform_hash, f'{w}.h5')\n for s in ('train', 'val')\n for w in self.all_cloud_ids[s]]\n if self.stage == 'trainval':\n return [\n osp.join(s, self.pre_transform_hash, f'{w}.h5')\n for s in ('train', 'val')\n for w in self.all_cloud_ids[s]]\n return [\n osp.join(self.stage, self.pre_transform_hash, f'{w}.h5')\n for w in self.cloud_ids]",
"def createFileNames(nFileNames, seqPrefix):\n nameList = []\n nameList = [seqPrefix+str(i)+\".txt\" for i in range(0, nFileNames)]\n return nameList",
"def assemble_files():\r\n path = os.path.expanduser(sys.argv[1])\r\n if os.path.isdir(path):\r\n file_root = path + \"/\"\r\n for file in os.listdir(path):\r\n filename = os.path.splitext(file)\r\n if filename[1] == \".asm\":\r\n hack_file_name = file_root + filename[0] + \".hack\"\r\n assemble_file(file_root + file, hack_file_name)\r\n else:\r\n filename = os.path.splitext(path)\r\n hack_file_name = filename[0] + \".hack\"\r\n assemble_file(path, hack_file_name)",
"def gather_initial_fullnames():\n\n infullnames = []\n for (dirpath, _, filenames) in os.walk('.'):\n dpath = dirpath[2:]\n if dpath:\n dpath += '/'\n for fname in filenames:\n infullnames.append('%s%s' % (dpath, fname))\n\n if miscutils.fwdebug_check(6, 'PFWRUNJOB_DEBUG'):\n miscutils.fwdebug_print(\"initial infullnames=%s\" % infullnames)\n return infullnames",
"def get_file_list() -> List[str]:\n filenames = []\n os.makedirs(\"sequence\", exist_ok=True)\n for file in glob.glob(\"sequence/*.smp\"):\n filenames.append(file.replace(\"sequence/\", \"\"))\n return filenames",
"def _find_named_files(self):\n for name, description in self.named_files.iteritems():\n name = name.format(job_name=self.job_name)\n f_path = '{}/{}'.format(self.rism3d_folder, name)\n if os.path.isfile(f_path):\n self.file_path_dic[description] = f_path\n else:\n self._not_found_error(f_path)",
"def buildListOfFiles(searchGlob):\n return [fpath for fpath in glob2.iglob(searchGlob) if os.path.isfile(fpath)]",
"def get_names():\n\n #Initialize entities dictionary\n entities = {'entity': 'source_file'}\n\n # Construct the raw_directory path\n project_root = os.environ['PYTHONPATH']\n raw_directory = '{}/data/raw/'.format(project_root)\n \n for file in os.listdir(raw_directory):\n if file.endswith('.json'):\n \n # Construct the full file path\n full_path = '{}{}'.format(raw_directory, file)\n \n # Open each JSON file\n with open(full_path, 'r') as source_file:\n data = source_file.read()\n parsed_data = json.loads(data)\n \n # Iterate through the dictionary parsed_data\n for key in parsed_data:\n if 'SocialTag' in key:\n name = parsed_data[key]['name']\n entities.update({name: file})\n\n return entities",
"def make_dummy_files(paths):\n for p in paths:\n make_dummy_file(p)",
"def build(self) -> None:\n def do_process(fname) -> bool:\n for sfx in skip_suffixes:\n if fname.endswith(sfx):\n return False\n return True\n\n for dirpath, _, fnames in os.walk(self.template_dir):\n for fname in fnames:\n if do_process(fname):\n self.process(dirpath, fname)",
"def gen_find(filepat, top):\n for path, dir_list, file_list in os.walk(top):\n for name in fnmatch.filter(file_list, filepat):\n yield os.path.join(path, name)"
] | [
"0.6215151",
"0.6173545",
"0.6165678",
"0.6132175",
"0.60765666",
"0.60045236",
"0.59673595",
"0.5943801",
"0.5906433",
"0.5898301",
"0.5871709",
"0.5870495",
"0.5845655",
"0.58379316",
"0.5836181",
"0.583417",
"0.58176804",
"0.58070236",
"0.57988113",
"0.5745292",
"0.57418555",
"0.57048243",
"0.5697326",
"0.56918204",
"0.56887746",
"0.5685817",
"0.5682319",
"0.5680787",
"0.56605625",
"0.56593716"
] | 0.6815235 | 0 |
Find candidate screenshots to overwrite. | def find_screenshots():
# Inside SCREENSHOT_DIR, there should be 1 folder with a
# random name which contains the user's puzzles. Just
# attempt to modify a screenshot in each of the directories
# in the folder.
for folder in os.listdir(SCREENSHOT_DIR):
full_path = os.path.join(SCREENSHOT_DIR, folder)
if os.path.isdir(full_path):
# The screenshot to modify is untitled.jpg
screenshot = os.path.join(full_path, 'untitled.jpg')
if os.path.isfile(screenshot):
yield screenshot | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def grab_heroes_pool_images(screenshot):\n\n #screenshot = WindowManager.preprocess_image(screenshot)\n return [screenshot.crop((509, 280, 820, 790)),\n screenshot.crop((820, 280, 1131, 790)),\n screenshot.crop((1131, 280, 1442, 790)),\n screenshot.crop((1442, 280, 1753, 790)),\n screenshot.crop((1753, 280, 2064, 790))]",
"def grab_heroes_in_hand_upgrade_images(screenshot):\n\n return [screenshot.crop((550, 1070, 731, 1140)),\n screenshot.crop((731, 1070, 912, 1140)),\n screenshot.crop((912, 1070, 1093, 1140)),\n screenshot.crop((1093, 1070, 1274, 1140)),\n screenshot.crop((1274, 1070, 1455, 1140)),\n screenshot.crop((1455, 1070, 1636, 1140)),\n screenshot.crop((1636, 1070, 1817, 1140)),\n screenshot.crop((1817, 1070, 1998, 1140))]",
"def check_images():\n\n print(f'Looking for duplicate images...')\n\n for image in images_in_directory:\n duplicate = check_image_for_duplicates(image)\n\n if (duplicate):\n print(f'Found {duplicate} to be a duplicate image of: {image}')\n remove_image(duplicate)\n pass",
"def screenshots(self, screenshots):\n self._screenshots = screenshots",
"def grab_heroes_on_board_images(screenshot):\n\n row3 = [screenshot.crop((609, 897, 790, 1082)),\n screenshot.crop((774, 897, 955, 1082)),\n screenshot.crop((939, 897, 1120, 1082)),\n screenshot.crop((1104, 897, 1285, 1082)),\n screenshot.crop((1269, 897, 1450, 1082)),\n screenshot.crop((1434, 897, 1615, 1082)),\n screenshot.crop((1599, 897, 1780, 1082)),\n screenshot.crop((1764, 897, 1945, 1082))]\n\n row2 = [screenshot.crop((633, 764, 808, 941)),\n screenshot.crop((792, 764, 967, 941)),\n screenshot.crop((951, 764, 1126, 941)),\n screenshot.crop((1110, 764, 1285, 941)),\n screenshot.crop((1269, 764, 1444, 941)),\n screenshot.crop((1428, 764, 1603, 941)),\n screenshot.crop((1587, 764, 1762, 941)),\n screenshot.crop((1746, 764, 1921, 941))]\n\n row1 = [screenshot.crop((654, 640, 822, 808)),\n screenshot.crop((808, 640, 976, 808)),\n screenshot.crop((962, 640, 1130, 808)),\n screenshot.crop((1116, 640, 1284, 808)),\n screenshot.crop((1270, 640, 1438, 808)),\n screenshot.crop((1424, 640, 1592, 808)),\n screenshot.crop((1578, 640, 1746, 808)),\n screenshot.crop((1732, 640, 1900, 808))]\n\n row0 = [screenshot.crop((664, 524, 834, 684)),\n screenshot.crop((820, 524, 986, 684)),\n screenshot.crop((970, 524, 1136, 684)),\n screenshot.crop((1120, 524, 1286, 684)),\n screenshot.crop((1270, 524, 1436, 684)),\n screenshot.crop((1420, 524, 1586, 684)),\n screenshot.crop((1570, 524, 1736, 684)),\n screenshot.crop((1720, 524, 1886, 684))]\n\n return [row0, row1, row2, row3]",
"def mod_screenshots():\n mod_type = CONF['screenshot_type', 'PETI'].lower()\n\n if mod_type == 'cust':\n LOGGER.info('Using custom screenshot!')\n scr_loc = CONF['screenshot', '']\n elif mod_type == 'auto':\n LOGGER.info('Using automatic screenshot!')\n scr_loc = None\n # The automatic screenshots are found at this location:\n auto_path = os.path.join(\n '..',\n GAME_FOLDER.get(CONF['game_id', ''], 'portal2'),\n 'screenshots'\n )\n # We need to find the most recent one. If it's named\n # \"previewcomplete\", we want to ignore it - it's a flag\n # to indicate the map was playtested correctly.\n try:\n screens = [\n os.path.join(auto_path, path)\n for path in\n os.listdir(auto_path)\n ]\n except FileNotFoundError:\n # The screenshot folder doesn't exist!\n screens = []\n screens.sort(\n key=os.path.getmtime,\n reverse=True,\n # Go from most recent to least\n )\n playtested = False\n for scr_shot in screens:\n filename = os.path.basename(scr_shot)\n if filename.startswith('bee2_playtest_flag'):\n # Previewcomplete is a flag to indicate the map's\n # been playtested. It must be newer than the screenshot\n playtested = True\n continue\n elif filename.startswith('bee2_screenshot'):\n continue # Ignore other screenshots\n\n # We have a screenshot. Check to see if it's\n # not too old. (Old is > 2 hours)\n date = datetime.fromtimestamp(\n os.path.getmtime(scr_shot)\n )\n diff = datetime.now() - date\n if diff.total_seconds() > 2 * 3600:\n LOGGER.info(\n 'Screenshot \"{scr}\" too old ({diff!s})',\n scr=scr_shot,\n diff=diff,\n )\n continue\n\n # If we got here, it's a good screenshot!\n LOGGER.info('Chosen \"{}\"', scr_shot)\n LOGGER.info('Map Playtested: {}', playtested)\n scr_loc = scr_shot\n break\n else:\n # If we get to the end, we failed to find an automatic\n # screenshot!\n LOGGER.info('No Auto Screenshot found!')\n mod_type = 'peti' # Suppress the \"None not found\" error\n\n if srctools.conv_bool(CONF['clean_screenshots', '0']):\n LOGGER.info('Cleaning up screenshots...')\n # Clean up this folder - otherwise users will get thousands of\n # pics in there!\n for screen in screens:\n if screen != scr_loc:\n os.remove(screen)\n LOGGER.info('Done!')\n else:\n # PeTI type, or something else\n scr_loc = None\n\n if scr_loc is not None and os.path.isfile(scr_loc):\n # We should use a screenshot!\n for screen in find_screenshots():\n LOGGER.info('Replacing \"{}\"...', screen)\n # Allow us to edit the file...\n utils.unset_readonly(screen)\n shutil.copy(scr_loc, screen)\n # Make the screenshot readonly, so P2 can't replace it.\n # Then it'll use our own\n utils.set_readonly(screen)\n\n else:\n if mod_type != 'peti':\n # Error if we were looking for a screenshot\n LOGGER.warning('\"{}\" not found!', scr_loc)\n LOGGER.info('Using PeTI screenshot!')\n for screen in find_screenshots():\n # Make the screenshot writeable, so P2 will replace it\n LOGGER.info('Making \"{}\" replaceable...', screen)\n utils.unset_readonly(screen)",
"def compare_picture_auto_collect(screenshot_file, template_file, auto_fix=False):\r\n if not os.path.exists(screenshot_file):\r\n raise Exception(\"Can not find screenshot_file:{}\".format(screenshot_file))\r\n\r\n if not os.path.exists(template_file):\r\n print(\"can not find template file:{} ,create a new one\".format(template_file))\r\n dirs = os.path.dirname(template_file)\r\n if not os.path.exists(dirs):\r\n os.makedirs(dirs)\r\n shutil.copyfile(screenshot_file, template_file)\r\n path, ext = os.path.splitext(template_file)\r\n shutil.copyfile(screenshot_file, \"{}_auto{}\".format(path, ext))\r\n return compare_picture_list(screenshot_file, template_file, auto_fix)",
"def grab_big_hp_images(screenshot):\n return [screenshot.crop((123, 160, 400, 254)),\n screenshot.crop((123, 268, 400, 362)),\n screenshot.crop((123, 376, 400, 470)),\n screenshot.crop((123, 484, 400, 578)),\n screenshot.crop((123, 592, 400, 686)),\n screenshot.crop((123, 700, 400, 794)),\n screenshot.crop((123, 808, 400, 902)),\n screenshot.crop((123, 916, 400, 1010))]",
"def selectPatches(originDir, destinationDir, indices):\n for i in indices :\n filePath = originDir + str(i).rstrip() + \".png\"\n\n if not os.path.isfile(filePath):\n print(\"file does not exist\")\n shutil.copy2(filePath,destinationDir)\n print(\"Done\")",
"def save_unique_image():\r\n global folder_name\r\n filelist = [file for file in os.listdir('temp') if file.endswith('.png')]\r\n\r\n if filelist:\r\n for image_path in filelist:\r\n found = 0\r\n img_to_del = Image.open(\"temp/\" + image_path)\r\n if not get_immediate_subdirectories():\r\n found = 1\r\n os.makedirs('detected_faces/1/')\r\n img_to_del.save('detected_faces/1/'+ image_path)\r\n os.remove(os.path.join(temp_path, image_path))\r\n folder_name = 1\r\n else:\r\n for folder in get_immediate_subdirectories():\r\n folder_filelist = [file for file in os.listdir(\"detected_faces/\" + folder) if\r\n file.endswith('.png')]\r\n count = len(folder_filelist)\r\n file = folder_filelist[0]\r\n img_to_compare = Image.open(\"detected_faces/\" + folder + \"/\" + file)\r\n if img_to_del.size > img_to_compare.size:\r\n temp_image_resized = img_to_del.resize(img_to_compare.size, Image.ANTIALIAS)\r\n index = get_ssim(temp_image_resized, img_to_compare)\r\n elif img_to_del.size < img_to_compare.size:\r\n img_to_compare = img_to_compare.resize(img_to_del.size, Image.ANTIALIAS)\r\n index = get_ssim(img_to_del, img_to_compare)\r\n else:\r\n index = get_ssim(img_to_del, img_to_compare)\r\n if index > min_ssim_index_val:\r\n found = 1\r\n if count < 5:\r\n img_to_del.save(pathname + \"/\" + folder + \"/\" + image_path)\r\n print image_path\r\n if os.path.isfile(os.path.join(temp_path, image_path)):\r\n os.remove(os.path.join(temp_path, image_path))\r\n if found == 0:\r\n folder_name += 1\r\n os.makedirs('detected_faces/' + str(folder_name))\r\n img_to_del.save(pathname + \"/\" + str(folder_name) + \"/\" + image_path)\r\n if os.path.isfile(os.path.join(temp_path, image_path)):\r\n os.remove(os.path.join(temp_path, image_path))",
"def grab_hp_images(screenshot):\n return [screenshot.crop((230, 208, 400, 254)),\n screenshot.crop((230, 316, 400, 362)),\n screenshot.crop((230, 424, 400, 470)),\n screenshot.crop((230, 532, 400, 578)),\n screenshot.crop((230, 640, 400, 686)),\n screenshot.crop((230, 748, 400, 794)),\n screenshot.crop((230, 856, 400, 902)),\n screenshot.crop((230, 964, 400, 1010))]",
"def screenshots(self):\n return self._screenshots",
"def rm_duplicates(self):\n # get uniq representation of existing detection documents\n existing = set(ed.uniq_data for ed in self.existing_detections)\n # remove duplicates\n for idx in xrange(len(self.new_detections)-1, -1, -1):\n nd = self.new_detections[idx]\n if nd.uniq_data in existing:\n self.new_detections.pop(idx)",
"def grab_heroes_in_hand_images(screenshot):\n\n return [screenshot.crop((550, 1140, 731, 1325)),\n screenshot.crop((731, 1140, 912, 1325)),\n screenshot.crop((912, 1140, 1093, 1325)),\n screenshot.crop((1093, 1140, 1274, 1325)),\n screenshot.crop((1274, 1140, 1455, 1325)),\n screenshot.crop((1455, 1140, 1636, 1325)),\n screenshot.crop((1636, 1140, 1817, 1325)),\n screenshot.crop((1817, 1140, 1998, 1325))]",
"def find_duplicates():\n return AppServer.service.find_duplicated_files()",
"def find_uglies():\n for file_type in ['neg']:\n for img in os.listdir(file_type):\n for ugly in os.listdir('uglies'):\n try:\n current_image_path = str(file_type) + '/'+str(img)\n ugly = cv2.imread('uglies/' + str(ugly))\n question = cv2.imread(current_image_path)\n if ugly.shape == question.shape and not(np.bitwise_xor(ugly, question).any()):\n print('That is one ugly pic! Deleting!')\n print(current_image_path)\n os.remove(current_image_path)\n except Exception as e:\n print(str(e))",
"def analyze(self, scratch):\n for sprite in self.iter_sprites(scratch):\n for default in self.default_names:\n if default in sprite.name:\n self.total_default += 1\n self.list_default.append(sprite.name)",
"def is_new_based_on_imgs(soup):\n\n \n \n prev_hashes = get_prev_img_hashes()\n temp_hashes = get_temp_img_hashes(soup)\n\n if len(temp_hashes.difference(prev_hashes))>0:\n print(\"new, based on images\")\n return True\n else:\n return False",
"def extract_patches(image_list, mask_src, image_src, mask_dst, image_dst, patch_size):\n class_counts = defaultdict(lambda: 0)\n skipped = 0\n total = 0\n for im in tqdm(image_list):\n img = cv2.imread(os.path.join(image_src, im))\n msk = cv2.imread(os.path.join(mask_src, im), 0)\n \n assert (img.shape[0] == msk.shape[0]) \\\n and (img.shape[1] == msk.shape[1]), \"Mismatch!\"\n\n img_patches = patchify(img, (patch_size, patch_size, 3), step=patch_size)\n msk_patches = patchify(msk, (patch_size, patch_size), step=patch_size)\n img_patches = img_patches.reshape((-1, patch_size, patch_size, 3))\n msk_patches = msk_patches.reshape((-1, patch_size, patch_size))\n # Step = 256 for patch size means no overlap\n for i in range(img_patches.shape[0]):\n # Replace class labels\n mask_patch = replace_classes(msk_patches[i])\n unique, counts = np.unique(mask_patch, return_counts=True)\n # If outside of RoI takes > 90% and there is only 1 class, ignore the patch.\n outside = np.mean(mask_patch == 0) > 0.9\n if outside and (len(unique) < 2):\n skipped += 1\n continue\n for x, y in enumerate(unique):\n class_counts[y] += counts[x].item()\n img_patch = img_patches[i]\n filename = im.split(\".png\")[0] + \"_\" + str(i) + \".png\"\n cv2.imwrite(os.path.join(image_dst, filename), img_patch)\n cv2.imwrite(os.path.join(mask_dst, filename), mask_patch)\n total += 1\n print('Skipped: {} / {}'.format(skipped, total))\n return class_counts",
"def figure_roi_inspect_all(self):\n for roiNumber in range(len(self.rois)):\n self.figure_roi_inspect(roiNumber,saveAs=\"roi_%02d.png\"%roiNumber)",
"def configure_screenshots(scenario):\r\n world.auto_capture_screenshots = False",
"def analyze(self, scratch):\n for background in scratch.stage.backgrounds:\n for default in self.default_names:\n if default in background.name:\n self.total_default += 1\n self.list_default.append(background.name)",
"def test_screenshots_generated():\n with temporary_dir() as output_dir:\n output_dir = Path(output_dir)\n copyfile(\n TEST_ROBOT_OUTPUT_FILES / \"robot_screenshots.xml\",\n output_dir / \"output.xml\",\n )\n open(output_dir / \"selenium-screenshot-1.png\", mode=\"w+\")\n open(output_dir / \"selenium-screenshot-2.png\", mode=\"w+\")\n\n flowtask = FlowTaskFactory()\n robot_importer.import_robot_test_results(flowtask, output_dir)\n\n # output.xml asset created\n assert 1 == BuildFlowAsset.objects.filter(category=\"robot-output\").count()\n # suite setup screenshot assets created\n assert 1 == BuildFlowAsset.objects.filter(category=\"robot-screenshot\").count()\n # No screenshots created for 'Via API' test\n tr_method = models.TestMethod.objects.get(name=\"Via API\")\n test_api = models.TestResult.objects.get(method=tr_method, task=flowtask)\n assert 0 == test_api.assets.count()\n\n # One screenshot created for 'Via UI' test\n tr_method = models.TestMethod.objects.get(name=\"Via UI\")\n test_ui = models.TestResult.objects.get(method=tr_method, task=flowtask)\n assert 1 == test_ui.assets.count()",
"def find_all_faces_in_multiple_img(img_dir_path, detector, img_size, dst_path):\r\n\r\n number_of_faces_already_found = count_files_in_one_directory(dst_path)\r\n print('number_of_faces_already_found', number_of_faces_already_found)\r\n for filename in os.listdir(img_dir_path):\r\n try:\r\n # When the user decides to add new data to existing one, no need to deal again (refind faces) with the old data\r\n if list(map(int, re.findall(r'\\d+', filename)))[0] < number_of_faces_already_found:\r\n continue\r\n img_path = os.path.join(img_dir_path, filename)\r\n print(filename + ': IN PROGRESS')\r\n detection_status = find_all_faces_in_one_img(\r\n img_path, detector, img_size, dst_path)\r\n print(filename + ': {}\\n'.format(detection_status.upper()))\r\n except:\r\n continue",
"def test_full_resize(self):\n number_of_pixels = 300\n destination = base_path +'/test_data/rendering_tests/resized_images/'\n source_folder = base_path + '/test_data/rendering_tests/filter_database/'\n\n\n for the_file in os.listdir(destination):\n file_path = os.path.join(destination, the_file)\n if os.path.isfile(file_path):\n os.unlink(file_path)\n\n\n self.assertEqual(0, len(os.listdir(destination)))\n rb.find_all_files(number_of_pixels,source_folder, destination)\n self.assertEqual(6, len(os.listdir(destination)))\n for the_file in os.listdir(destination):\n file_path = os.path.join(destination,the_file)\n with Image.open(file_path) as f:\n self.assertNotEqual(number_of_pixels+5, f.size[0])\n self.assertNotEqual(number_of_pixels+5, f.size[1])\n # the above checks that the size does not vary as needed\n # probably not necessary\n self.assertEqual(number_of_pixels, f.size[0])\n self.assertEqual(number_of_pixels, f.size[1])",
"def scan(self):\n picker_photos = self._order_picked(self._scan())\n self._picked_file_paths = [\n picker_photo.filepath for picker_photo in picker_photos\n ]",
"def _dump_image(self):\n if not self._current_id == len(self._img_ids):\n warnings.warn(\n 'Recorded {} out of {} validation images, incomplete results'.format(\n self._current_id, len(self._img_ids)))\n try:\n for im_name, im in self._panoptic_images.items():\n cv2.imwrite(osp.join(self._save_imgpath, im_name), im)\n except IOError as e:\n raise RuntimeError(\"Unable to dump images, ignored. What(): {}\".format(str(e)))",
"def save_step_2(imgs, match_list, output_path=\"./output/step2\"):\n # ... your code here ...\n for i in range(len(imgs)):\n name1,tail1 = str.split(filenames[match_list[i][0]],\".\")\n name2,tail2 = str.split(filenames[match_list[i][2]],\".\")\n cv2.imwrite(output_path+\"/\"+name1+\"_\"+str(match_list[i][1])+\"_\"+name2+\"_\"+str(match_list[i][3])+\"_\"+str(match_list[i][4])+\".jpg\", imgs[i])",
"def stitch_images(self):\n stitched_folder_name = self.parent_folder + 'stitched'\n print(\"Stitching images in:\")\n print(self.folder_list)\n print(\"Storing in: \" + str(stitched_folder_name))\n\n try:\n print(\"Making dir \" + str(stitched_folder_name) + \" for stitching\")\n os.mkdir(stitched_folder_name)\n except OSError:\n print(\"Folder exists, have you already done this stitching??\")\n return\n\n photo_list = self.get_photo_list(self.parent_folder + '/' + self.folder_list[0])\n # get photo sizes\n print(self.parent_folder + '/' + self.folder_list[0] + '/' + photo_list[0])\n size_photo = cv2.imread(self.parent_folder + '/' + self.folder_list[0] +\n '/' + photo_list[0], cv2.IMREAD_ANYDEPTH)\n photo_height, photo_width = np.shape(size_photo)\n stitched_height = photo_height * 2\n stitched_width = photo_width * 4\n\n for photo in photo_list:\n stitched_photo = np.full((stitched_height, stitched_width), 0)\n\n for i, folder in enumerate(self.folder_list):\n print(i)\n print(folder)\n print(self.parent_folder + folder + '/' + photo)\n\n stitched_photo[(int((float(i) / 4.0)) * photo_height):(int(((float(i) / 4.0) + 1)) * photo_height),\n (int(i % 4) * photo_width):((int((i % 4) + 1)) * photo_width)] \\\n = cv2.imread(self.parent_folder + '/' + folder + '/' + photo, cv2.IMREAD_ANYDEPTH)\n\n stitched_photo = stitched_photo.astype(np.uint16)\n cv2.imwrite(stitched_folder_name + '/' + photo, stitched_photo, [cv2.IMWRITE_PNG_COMPRESSION, 0])\n\n return stitched_folder_name",
"def match_histograms(src_path, dst_path, size=128, step_size=128, *, reference_path):\n with rasterio.open(src_path) as src:\n profile = src.profile.copy()\n windows = list(\n sliding_windows(\n (size, size), (step_size, step_size), src.width, src.height, whole=False\n )\n )\n\n with rasterio.open(reference_path) as ref:\n with rasterio.open(dst_path, \"w\", **profile) as dst:\n for c, (win, (i, j)) in tqdm(list(enumerate(windows))):\n _logger.debug(\"%s %s\", win, (i, j))\n\n img = read_window(src, win)\n ref_img = read_window(ref, win)\n\n matched_img = exposure.match_histograms(\n img, ref_img, multichannel=True\n )\n write_window(matched_img, dst, win)"
] | [
"0.56268287",
"0.55836725",
"0.5466227",
"0.54593205",
"0.54427487",
"0.5429798",
"0.5360617",
"0.5355129",
"0.533144",
"0.52600294",
"0.52395785",
"0.516024",
"0.513637",
"0.5128769",
"0.51157874",
"0.5084877",
"0.50637954",
"0.5062044",
"0.50510454",
"0.5047947",
"0.5047658",
"0.5046469",
"0.50156593",
"0.5001473",
"0.4999575",
"0.493691",
"0.48969343",
"0.4881541",
"0.48794788",
"0.48767737"
] | 0.66657865 | 0 |
Returns the UofT Graduate GPA for a given grade. | def grade_to_gpa(grade):
letter_grade = ""
gpa = 0.0
if type(grade) is str:
accepted_values = ["A+", "A", "A-", "B+", "B", "B-", "FZ"]
# check that the grade is one of the accepted values
if grade in accepted_values:
# assign grade to letter_grade
letter_grade = grade
#If grade input is a string, but not an accepted value, raise a ValueError
else:
raise ValueError("Incorrect value. Grade must be an accepted letter grade.")
elif type(grade) is int:
# check that grade is in the accepted range 0 to 100
if 0 <= grade <= 100:
# convert the numeric grade to a letter grade
mark_to_letter = grade
# assign the value to letter_grade
# hint: letter_grade = mark_to_letter(grade)
if mark_to_letter >= 90:
letter_grade = "A+"
elif mark_to_letter >= 85:
letter_grade = "A"
elif mark_to_letter >= 80:
letter_grade = "A-"
elif mark_to_letter >= 77:
letter_grade = "B+"
elif mark_to_letter >= 73:
letter_grade = "B"
elif mark_to_letter >= 70:
letter_grade = "B-"
else:
letter_grade = "FZ"
#If grade input is not in accepted range, raise ValueError
else:
raise ValueError("Incorrect value. Grade must be in the accepted range of 0 to 100.")
else:
# raise a TypeError exception
raise TypeError("Invalid type passed as parameter")
# write a long if-statement to convert letter_grade
# assign the value to gpa
if letter_grade == "A+":
gpa = 4.0
if letter_grade == "A":
gpa = 4.0
if letter_grade == "A-":
gpa = 3.7
if letter_grade == "B+":
gpa = 3.3
if letter_grade == "B":
gpa = 3.0
if letter_grade == "B-":
gpa = 2.7
if letter_grade == "FZ":
gpa = 0.0
#Return the gpa of the grade
return gpa | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_gpa(current_bucket_gpa):\r\n\r\n computed_grade = re.findall(r\"\\d+[.]?\\d*\", current_bucket_gpa)\r\n if len(computed_grade) > 0:\r\n computed_grade = computed_grade[0]\r\n if float(computed_grade) > 10:\r\n computed_grade = float(computed_grade[0]) / 10.0\r\n return round(float(computed_grade), 2)\r\n return 0.0",
"def get_gpa(scores):\n subjects_gpas = []\n for score in scores:\n subjects_gpas.append(calculate_gpa(score))\n gpa = get_average(subjects_gpas)\n return gpa",
"def calculate_gpa(self):\n cur_node = self.head\n gpa = 0\n total_credits = 0\n while cur_node is not None:\n gpa += cur_node.data.grade() * cur_node.data.credit_hr()\n total_credits += cur_node.data.credit_hr()\n cur_node = cur_node.next\n if total_credits == 0:\n return 0\n return gpa / total_credits",
"def get_grade(self):\n return self.__grade_value",
"def get_grade(self) -> int :\n return self.grade",
"def gpa(self, new_gpa):\n if self.MIN_GPA <= new_gpa <= self.MAX_GPA:\n self._gpa = new_gpa\n else:\n raise ValueError",
"def round_grade(grade):\n\t# only round grade if the input is a float\n\tif type(grade) != str:\n\t\trounded_grade = np.floor(grade/10)\n\t\treturn rounded_grade\n\t# otherwise return the original grade -> could be string \"Lunch\" or \"Period\"\n\telse:\n\t\treturn grade",
"def grade_conversion(grade):\n grade_converter = {\"A\": 4.00, \"A-\":3.67, \"B+\": 3.33, \"B\": 3.00, \"B-\": 2.67, \"C+\": 2.33, \"C\": 2.00, \"C-\": 1.67, \"D\": 1.00, \"F\": 0.0}\n while True:\n for val, val2 in grade_converter.items():\n if grade == val:\n return val2",
"def calculate_gpa(score):\n if score < 60:\n return 0\n elif 60 <= score < 70:\n return 1\n elif 70 <= score < 80:\n return 2\n elif 80 <= score < 90:\n return 3\n elif score >= 90:\n return 4",
"def add_percentage(grade):\n\tif type(grade) == float:\n\t\tperc_grade = str(grade) + '%'\n\t\treturn perc_grade\n\telse:\n\t\treturn grade",
"def print_students_gpa(std):\n print (\"Student Id:\", get_id(std))\n print (\"Student name:\", get_fname(get_name(std)), get_lname(get_name(std)))\n print (\"GPA: %.2f\" %(calc_gpa(std)))",
"def calc_grade(self, average):\n if 95 <= average:\n return 'S'\n elif 90 <= average:\n return 'A'\n elif 80 <= average:\n return 'B'\n elif 70 <= average:\n return 'C'\n elif 60 <= average:\n return 'D'\n else:\n return 'F'",
"def gpa_calculator():\n gpa = 0.0\n grade_array = []\n credit_array = []\n grade_converter = {\"A\": 4.00, \"A-\":3.67, \"B+\": 3.33, \"B\": 3.00, \"B-\": 2.67, \"C+\": 2.33, \"C\": 2.00, \"C-\": 1.67, \"D\": 1.00, \"F\": 0.0}\n with open('full_courses.json', 'r') as fp:\n full_courses = json.load(fp)\n for val in full_courses.values():\n if val[2] == 'C':\n credit_array.append(val[0])\n for i, val2 in grade_converter.items():\n if val[1] == i:\n grade_array.append(val2)\n final_array = [val*val1 for val,val1 in zip(grade_array, credit_array)]\n gpa = round(sum(final_array)/sum(credit_array),2)\n print(\"GPA CALCULATED AS: \"+str(gpa))\n return gpa",
"def get_grade(course_det):\n return course_det[1]",
"def grade_calculate_grade(self):\n try:\n if int(self.root.ids.grade_input_grade.text) >= 85:\n grade = 'High Distinction'\n elif int(self.root.ids.grade_input_grade.text) >= 75:\n grade = 'Distinction'\n elif int(self.root.ids.grade_input_grade.text) >= 65:\n grade = 'Credit'\n elif int(self.root.ids.grade_input_grade.text) >= 50:\n grade = 'Pass'\n else:\n grade = 'Fail'\n self.root.ids.grade_output_label.text = 'Grade: ' + grade\n except ValueError:\n\n self.root.ids.grade_output_label.text = 'Invalid Grade'",
"def get_grade(soup):\n\n # up there with with route name\n grade_table = soup.h3\n\n # look for grades in spans\n grade = []\n for s in grade_table.find_all('span'):\n\n # class names are the grading systems\n if s['class'] != None:\n head = s['class'][0]\n head = head.encode('utf8', errors = 'strict')\n\n # grade are showing with text\n body = s.get_text()\n body = body.encode('utf8', errors = 'ignore')\n\n grade.append(body)\n\n # extract tbe grades\n grade_data = {}\n for g in grade:\n h = g.split(SPLIT_CHAR)\n if len(h) > 1:\n grade_data['rate'+h[0].strip()] = h[1]\n\n return grade_data",
"def gfa(self):\n return self.GFA",
"def grade(self):\n if round(self.numAvg,0) >= 70:\n return round(self.numAvg,0)\n elif self.PassSummer:\n return 70\n elif round(self.numAvg,0) >= 55 and not self.PassSummer:\n return round(self.numAvg,0)\n else:\n return 55",
"def grades_to_number(grade):\n # Conditions\n if grade == \"A\":\n return 1\n elif grade == \"B\":\n return 0.5\n elif grade == \"C\":\n return 0\n elif grade == \"D\":\n return -0.5\n else:\n return -1",
"def getGrade(self, difficulty):\n \n grade = skillToGrade(self.skill, difficulty)\n self.grades.append(grade)\n return grade",
"def fGT(self):\n pass",
"def gsrfp(self, gp, lai):\n\t return (lai*self.gtf()*gp/self.F_CAP)/(self.gtf() + lai*gp/self.F_CAP)",
"def ptpresionagua(self,prof_pt): #getter que halla la presion de poros en un punto\r\n p_agua=0.0\r\n if prof_pt<self.n_fret:\r\n p_agua=0.0\r\n pass\r\n else:\r\n p_agua=(prof_pt-self.n_fret)*self.gamma_h20\r\n return p_agua",
"def gpa(self):\n try:\n return sum(self.courses.values()) / len(self.courses)\n except ZeroDivisionError:\n return 0",
"def assignment_grade(id, session_id, course_id):\n\n user_id = session.get('user_id')\n\n con = db.get_db()\n cur = con.cursor()\n cur.execute(\"\"\"SELECT DISTINCT(ROUND(grades.points_received / grades.total_points, 2) * 100) as assignment_grade,\n grades.total_points as total, grades.points_received as earned,\n grades.submission as submission, grades.feedback as feedback,\n grades.student_id, grades.assignment_id as assign_id, assignments.name as assign_name,\n assignments.description as description,\n grades.grade_id, roster.session_id as class_session, courses.name as name\n\t FROM courses JOIN sessions on courses.course_id=sessions.id\n\t JOIN assignments on assignments.session_id=sessions.id\n JOIN grades on grades.assignment_id=assignments.assignment_id\n JOIN roster on roster.session_id=sessions.id\n WHERE grades.assignment_id= %s\n AND grades.student_id= %s\"\"\",\n (id, user_id))\n\n grade = cur.fetchone()\n cur.close()\n con.close()\n\n return render_template(\"/layouts/gradebook/assignment_grade.html\", course_id=course_id, session_id=session_id, id=id, grade=grade)",
"def policy(self, grading_label):\n element = self.find_css('#grading_type')[0]\n select = Select(element)\n select.select_by_visible_text(grading_label)\n\n EmptyPromise(\n lambda: self.policy == grading_label,\n \"Grading label is updated.\",\n ).fulfill()",
"def computeGrades(e1, e2, a):\n \n a = assignmentScores\n a.sort()\n i=0\n while i<10:\n sum+=sum a[i]\n avg = sum/10\n \n grade = ((e1 + e2) /2) * 0.4 + (avg) * 0.6\n \n return grade\n \n if grade >= 90 and grade <= 100:\n return(\"A\")\n \n elif grade >= 80 and grade < 90:\n return(\"B\")\n \n elif grade >= 70 and grade < 80:\n return(\"C\")\n \n elif grade >= 60 and grade < 70:\n return(\"D\")\n \n elif grade < 60:\n return(\"F\")",
"def degrees_to_grade(angle_in_degrees):\n if np.any(angle_in_degrees>90): #np.any works if angle_in_degrees is a single value or array\n raise Exception('Can only convert an angle between 0 to 90 degrees to a percentage grade.\\nSee: https://en.wikipedia.org/wiki/Grade_(slope)')\n angle_in_grade = 100*np.tan(angle_in_degrees*np.pi/180)\n return(angle_in_grade)",
"def rads_to_grade(angle_in_radians):\n if np.any(angle_in_radians>0.5*np.pi): #np.any works if angle_in_degrees is a single value or array\n raise Exception('Can only convert an angle between 0 to pi/2 radians to a percentage grade.\\nSee: https://en.wikipedia.org/wiki/Grade_(slope)')\n angle_in_grade = 100*np.tan(angle_in_radians)\n return(angle_in_grade)",
"def design_grna(seq):\n\n transcript = {'A': 'U', 'C': 'G', 'G': 'C', 'T': 'A'}\n grna = \"\".join(transcript[n] for n in seq)\n\n return grna"
] | [
"0.6791343",
"0.64294994",
"0.610843",
"0.59711015",
"0.593342",
"0.5807375",
"0.57462656",
"0.5738365",
"0.5735842",
"0.5732751",
"0.5731037",
"0.572275",
"0.5679913",
"0.56496114",
"0.5646817",
"0.5641173",
"0.5636095",
"0.5517465",
"0.5508186",
"0.5495881",
"0.5381232",
"0.5353609",
"0.5313167",
"0.5305285",
"0.52749133",
"0.52594036",
"0.5231944",
"0.51888645",
"0.51803577",
"0.5174595"
] | 0.6462246 | 1 |
Connect a datacenter to this endpoint. An endpoint can only be connected to a single datacenter. | def connect_datacenter(self, dc):
self.compute.dc = dc
for ep in self.openstack_endpoints.values():
ep.manage = self.manage
logging.info \
("Connected DC(%s) to API endpoint %s(%s:%d)" % (dc.label, self.__class__.__name__, self.ip, self.port)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def connect_dc_network(self, dc_network):\n self.manage.net = dc_network\n self.compute.nets[self.manage.floating_network.id] = self.manage.floating_network\n logging.info(\"Connected DCNetwork to API endpoint %s(%s:%d)\" % (\n self.__class__.__name__, self.ip, self.port))",
"def connect(self, container_name: str, aliases: list[str] = None,\n ipv4: str | None = None) -> None:\n self.log.debug(\n f\"Connecting {container_name} to network '{self.network_name}'\")\n self.network.connect(\n container_name, aliases=aliases, ipv4_address=ipv4\n )",
"def _connect(self):\n if self.cluster.get('encrypted_password'):\n self.cluster['password'] = aws_etl.utils.decrypt(\n self.cluster['encrypted_password'])\n\n self.connection = connect(\n host=self.cluster['host'],\n port=self.cluster['port'],\n sslmode='require',\n user=self.cluster['user'],\n password=self.cluster['password'],\n database=self.cluster['database'])\n return self.connection",
"def ConnectDevice(self, *args, **kwargs): # pylint: disable=invalid-name\n return self",
"def connect(self):\n self.cluster = Cluster([self.db_node])\n try:\n self.session = self.cluster.connect()\n self.session.default_timeout = DEFAULT_TIMEOUT\n except Exception as e:\n raise StorageError(\"Cannot connect to {}\".format(self.db_node), e)",
"def connect(self) -> None:\n self.s.connect((self.ip, self.port))",
"async def connect(self):\n self.logger.info(f'connecting to {self.dsn}')\n await self.dbase.connect()",
"def ConnectDevice(self, *args, **kwargs): # pylint: disable=invalid-name\n raise NotImplementedError",
"def connect(self, endpoint: Endpoint) -> ConnectionId:\n if not self.started:\n raise Exception(f\"Bus {self.busIdentity} is not active\")\n\n endpoint = Endpoint(endpoint)\n\n with self._lock:\n connId = self._newConnectionId()\n self._connIdToOutgoingEndpoint[connId] = endpoint\n self._connIdPendingOutgoingConnection.add(connId)\n\n # TriggerConnect must go on the sendQueue and not the EventQueue\n # in order for the auth_token to be sent (if necessary) before\n # any subsequent sendMessage calls schedule messages on the connection.\n # self._scheduleEvent((connId, TriggerConnect))\n self._putOnSendQueue(connId, TriggerConnect)\n\n return connId",
"def connect_to_vcenter(self, hostname=None, username=None, password=None, certFile=None):\n\n if not certFile:\n try:\n _create_unverified_https_context = ssl._create_unverified_context\n except AttributeError:\n # Legacy Python that doesn't verify HTTPS certificates by default\n pass\n else:\n # Handle target environment that doesn't support HTTPS verification\n ssl._create_default_https_context = _create_unverified_https_context\n\n try:\n self.connect = SmartConnect(host=hostname,\n user=username, pwd=password, certFile=certFile)\n msg = \"Successfull connection\"\n\n print(msg)\n return self.connect, msg\n except vim.fault.InvalidLogin as error:\n msg = \"Failed to connect to Vcenter %s using credentials \\\n username: %s and password: %s\" % (hostname, username, password)\n log.error(\"Failed to connect to Vcenter {0} using credentials \\\n username: {1} and password: {2}\".format(hostname, username, password))\n\n return msg\n # raise Exception(msg)\n except Exception as error:\n msg = \"Unable to connect to Vcenter %s because of %s\" % (hostname, error)\n log.error(msg)\n\n # raise Exception(msg)\n return msg\n # return msg",
"def connect(self):\n self.conn.connect()",
"def login(self):\n try:\n self._service_instance = connect.SmartConnect(host=self.address,\n user=self.username,\n pwd=self.password,\n port=self.port,\n sslContext=self.sslContext)\n #connectionPoolTimeout=self.timeout)\n except Exception as err:\n raise err",
"def connect(self):\n if self._connect is None:\n raise MissingFunctionDefinition(\"connect method is not mapped\")\n if not self.connected:\n self._connect()",
"def connect(self, **kw_params):\r\n if self.connection_cls:\r\n return self.connection_cls(region=self, **kw_params)",
"def connectivity_service_end_point(self, connectivity_service_end_point: str):\n\n self._connectivity_service_end_point = connectivity_service_end_point",
"def connect(self, **kwargs):\n raise NotImplementedError",
"def connect(self, username, password=None):\n if not password:\n password = getpass(\"Password for {0}: \".format(self.hostname))\n self.service_instance = connect.SmartConnect(host=self.hostname,\n user=username,\n pwd=password,\n port=443)\n atexit.register(connect.Disconnect, self.service_instance)",
"def device_connect(self):\n pass",
"def connect(\n manager_endpoint=None,\n admin_key=None,\n query_endpoint=None,\n verify_certificate=None,\n query_timeout=None,\n name=None,\n config_file=None):\n from .connect_fn import connect\n return connect(\n manager_endpoint=manager_endpoint,\n admin_key=admin_key,\n query_endpoint=query_endpoint,\n verify_certificate=verify_certificate,\n query_timeout=query_timeout,\n name=name,\n config_file=config_file)",
"def connect(self, **kwargs):\n pass",
"async def connect(self):\n self._conn = await self._loop.run_in_executor(\n None, connector.Connector, self._creds\n )",
"def connect(self, connID, addr):\r\n return self.callRemote('connect', connID, addr)",
"def _connect(self):\n cluster = Cluster('http://{}:{}'.format(self.host, self.port))\n authenticator = PasswordAuthenticator('Administrator', self.password)\n cluster.authenticate(authenticator)\n self.client = cluster.open_bucket(self.bucket)",
"def deploy_dc(self):\n print(\"==> Deploying Data Center\")\n # TODO: Replace Marvin\n mrv = marvin.marvinInit.MarvinInit(self.marvin_config)\n mrv.init()\n dc = marvin.deployDataCenter.DeployDataCenters(mrv.getTestClient(), mrv.getParsedConfig())\n dc.deploy()",
"def ConnectDevice(\n self, *args, **kwargs\n ): # pylint: disable=invalid-name, no-self-use\n raise socket_error",
"def connect(self):\n if not self.is_connected:\n self._init_cec_connection()",
"def connect(self, reconnect=True, *args, **kwargs):\n pass",
"def connect(self, device_ip, device_port=DEFAULT_PORT):\n return",
"async def connect(self, **kwargs) -> bool:\n return await self._backend.connect(**kwargs)",
"def connect(self):\n raise NotImplementedError"
] | [
"0.68350774",
"0.59132737",
"0.5732963",
"0.57322764",
"0.57078254",
"0.5706639",
"0.5680314",
"0.5666111",
"0.5593733",
"0.5567479",
"0.5567184",
"0.55639863",
"0.55574876",
"0.55011344",
"0.5458151",
"0.54542726",
"0.54163355",
"0.540785",
"0.5394772",
"0.53901094",
"0.5384594",
"0.5371194",
"0.53663856",
"0.53632843",
"0.5361721",
"0.53496647",
"0.5342228",
"0.5338957",
"0.53272104",
"0.531593"
] | 0.7805443 | 0 |
Connect the datacenter network to the endpoint. | def connect_dc_network(self, dc_network):
self.manage.net = dc_network
self.compute.nets[self.manage.floating_network.id] = self.manage.floating_network
logging.info("Connected DCNetwork to API endpoint %s(%s:%d)" % (
self.__class__.__name__, self.ip, self.port)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def connect_datacenter(self, dc):\n self.compute.dc = dc\n for ep in self.openstack_endpoints.values():\n ep.manage = self.manage\n logging.info \\\n (\"Connected DC(%s) to API endpoint %s(%s:%d)\" % (dc.label, self.__class__.__name__, self.ip, self.port))",
"def connect(self) -> None:\n self.s.connect((self.ip, self.port))",
"def connect(self):\n self.conn.connect()",
"def connect(self, container_name: str, aliases: list[str] = None,\n ipv4: str | None = None) -> None:\n self.log.debug(\n f\"Connecting {container_name} to network '{self.network_name}'\")\n self.network.connect(\n container_name, aliases=aliases, ipv4_address=ipv4\n )",
"async def connect(self):\n self.logger.info(f'connecting to {self.dsn}')\n await self.dbase.connect()",
"def _connect(self):\n try:\n #print(\"try to connect _connect\")\n sock = gevent.socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect(self.remote_address)\n except socket.error as error:\n logger.warning(\"Couldn't connect to %s: %s.\",\n self._repr_remote(), error)\n else:\n self.initialize(sock, self.remote_service_coord)",
"def _connect(self):\n if self.cluster.get('encrypted_password'):\n self.cluster['password'] = aws_etl.utils.decrypt(\n self.cluster['encrypted_password'])\n\n self.connection = connect(\n host=self.cluster['host'],\n port=self.cluster['port'],\n sslmode='require',\n user=self.cluster['user'],\n password=self.cluster['password'],\n database=self.cluster['database'])\n return self.connection",
"def connect(self):\n self.cluster = Cluster([self.db_node])\n try:\n self.session = self.cluster.connect()\n self.session.default_timeout = DEFAULT_TIMEOUT\n except Exception as e:\n raise StorageError(\"Cannot connect to {}\".format(self.db_node), e)",
"async def connect(self):\n await self._perform_connect()\n\n self.logger.debug(\"ewelink Connected\")\n self._publish('client', 'status', \"Connected\")\n self._disconnecting = False\n\n await self._receive_loop()",
"async def connect(self):\n self._conn = await self._loop.run_in_executor(\n None, connector.Connector, self._creds\n )",
"async def connect(self):\n await asyncio.gather(self._exchange_connection.connect_to_server(), self.on_connection())",
"def connect(self) -> None:\n self.terminate()\n self._new_client().connect(\n hostname=self.ip,\n port=self.port,\n username=self.username,\n password=self.password,\n look_for_keys=False,\n allow_agent=False)",
"async def connect(self):\n raise NotImplementedError",
"def connect(self):\n broadcast(\n \"Connect\", \n self.connection, \n self.network\n )\n \n listen(self.address, self.connection, self.message_handler)",
"def connect(self):\n\t\tself._entity_server_connection.attempt_connection()",
"def connect(self,ip,port):\n return self.network.connect(ip,port)",
"async def connect(self):\n try:\n self._cmd_stream = await self._connect()\n self.inc_counter(\"%s.connected\" % self.objname)\n self.logger.info(\"Connected: %s\", self._extra_info)\n except Exception as e:\n self.logger.error(\"Connect Failed %r\", e)\n self.inc_counter(\"%s.failed\" % self.objname)\n raise e",
"def connect(self):\n self.client.connect(self.host, self.port)\n self.client.loop_forever()",
"def connect(self):\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.connect((self.host, PORT)) # probably throws errors\n self.connected = True",
"def connect(self):\n self.socket.connect((\"localhost\",self.PORT_NUM))",
"def connect(self, device_ip, device_port=DEFAULT_PORT):\n return",
"def connect_to_peer(self):\n pass",
"def device_connect(self):\n pass",
"def connect(self):\n self.net.active(True)\n self.net.config(essid=self.ssid, password=self.pwd, channel=3)\n\n while not self.net.active():\n pass\n\n self.net.ifconfig((\"192.168.4.5\", \"255.255.255.0\", \"192.168.4.1\", \"208.67.222.222\"))",
"def _connect(self):\n hostport = self.getHost()\n channelOpenData = forwarding.packOpen_direct_tcpip((self.host, self.port), (hostport.host, hostport.port))\n self.connector.connection.openChannel(self, channelOpenData)",
"def connect(self) -> None:\n ...",
"def login(self):\n try:\n self._service_instance = connect.SmartConnect(host=self.address,\n user=self.username,\n pwd=self.password,\n port=self.port,\n sslContext=self.sslContext)\n #connectionPoolTimeout=self.timeout)\n except Exception as err:\n raise err",
"async def connect(self):\n\n # Display info message\n log.info(\"connect\")\n\n try:\n\n # SSH?\n if self._protocol == \"ssh\":\n\n # Yes\n\n # Then Connect using SSH\n await self.connectSSH()\n\n # Telnet?\n elif self._protocol == \"telnet\":\n\n # Yes\n\n # Then Connect using Telnet\n await self.connectTelnet()\n\n else:\n\n # Unsupported protocol\n\n # Raise an exception\n raise Exception(f\"connect: unsupported protocol: {self._protocol}\")\n\n except Exception:\n\n # There was a problem with a connection method\n\n # Display info message\n log.info(\"connect: connection error\")\n\n raise",
"def connect(self) -> None:\n self.client_socket.connect((self.server_name, self.server_port))",
"def connect(self):\n if not self._socket:\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.connect((self.host, self.port))\n self._socket.settimeout(0.0)"
] | [
"0.75026584",
"0.6867479",
"0.64917773",
"0.6375236",
"0.6371488",
"0.62445194",
"0.62019926",
"0.61340374",
"0.6125503",
"0.61206913",
"0.6095818",
"0.60791093",
"0.606961",
"0.6069137",
"0.60621864",
"0.6056231",
"0.6043292",
"0.6030431",
"0.6009846",
"0.6002214",
"0.6001334",
"0.59885085",
"0.5968427",
"0.5957235",
"0.595448",
"0.5915671",
"0.59120077",
"0.5906416",
"0.58909583",
"0.5888183"
] | 0.75623393 | 0 |
Start all connected OpenStack endpoints that are connected to this API endpoint. | def start(self, wait_for_port=False):
for c in self.openstack_endpoints.values():
c.compute = self.compute
c.manage = self.manage
c.server_thread = threading.Thread(target=c._start_flask, args=())
c.server_thread.daemon = True
c.server_thread.name = c.__class__.__name__
c.server_thread.start()
if wait_for_port:
self._wait_for_port(c.ip, c.port) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def start_all(self):\n try:\n for service in self.services:\n try:\n await service.start()\n await service.healthcheck()\n except Exception as e:\n log.exception(\"Exception while starting %s service\", service)\n raise ServiceStartupException from e\n self.started_services.append(service)\n except ServiceStartupException:\n log.error(\"Stopping services on startup failure\")\n await self.stop_all()\n raise",
"def start(self):\r\n for srv in self._servers:\r\n srv.start()",
"async def connections_endpoints(request: web.BaseRequest):\n context: AdminRequestContext = request[\"context\"]\n connection_id = request.match_info[\"conn_id\"]\n\n profile = context.profile\n connection_mgr = ConnectionManager(profile)\n try:\n endpoints = await connection_mgr.get_endpoints(connection_id)\n except StorageNotFoundError as err:\n raise web.HTTPNotFound(reason=err.roll_up) from err\n except (BaseModelError, StorageError, WalletError) as err:\n raise web.HTTPBadRequest(reason=err.roll_up) from err\n\n return web.json_response(dict(zip((\"my_endpoint\", \"their_endpoint\"), endpoints)))",
"def serviceConnects(self):\n #log.debug(f\"{self.name}: servicing new connections for.\")\n for ca, ix in list(self.server.ixes.items()):\n if ix.cutoff:\n self.closeConnection(ca)\n continue\n\n if ca not in self.connections:\n log.debug(f\"Adding new connection for {ix}.\")\n self.connections[ca] = Requester(self.dhtdoer, remoter=ix, name=ca)\n\n if ix.timeout > 0.0 and ix.tymer.expired:\n self.closeConnection(ca)",
"def start_peers(self):\n for i in self.nodes:\n i.start()",
"async def _track_and_propagate_available_endpoints(self) -> None:\n async for ev in self._endpoint.stream(EventBusConnected):\n self._available_endpoints = self._available_endpoints + (ev.connection_config,)\n self.logger.debug(\"New EventBus Endpoint connected %s\", ev.connection_config.name)\n # Broadcast available endpoints to all connected endpoints, giving them\n # a chance to cross connect\n await self._endpoint.broadcast(AvailableEndpointsUpdated(self._available_endpoints))\n self.logger.debug(\"Connected EventBus Endpoints %s\", self._available_endpoints)",
"def start_all(self):\n for proc in self.get_all():\n proc.start()",
"def start_all_nodes(self):\n for node in self.nodes:\n node.start()",
"def shutdown_all_endpoints(self):\n logger.debug('Removing all endpoints')\n endpoints = []\n with self._endpoint_lock:\n endpoints = list(self._endpoints)\n # be sure we're not holding the lock when shutdown calls\n # _remove_endpoint.\n for e in endpoints:\n e.shutdown()",
"def run(self):\n self.logger.info(\"start consuming api calls\")\n while not self.shutdown:\n self.rpc.listen()",
"def endpoints(self):\n\n # Yields the home page\n gui_uri = self.app.config.GUI_PX\n yield self.page(MiyagiAppHome, gui_uri)\n\n # Yields the process list page\n processes_uri = f'{gui_uri}{self.app.config.PROCESSES_PX}'\n yield self.page(ProcessesPage, processes_uri)\n\n for p_name, process in self.app.processes.items():\n # For every process yields the relative general page\n process_uri = f'{processes_uri}/{p_name}'\n yield self.page(\n ProcessPage,\n process_uri,\n process=process\n )\n for obj in process.objects:\n # For every object in the process yields the relative page\n # List of instances + general object actions\n object_uri = f'{process_uri}{self.app.config.OBJECTS_PX}/{obj.name.lower()}'\n yield self.page(\n ObjectPage,\n object_uri,\n handler='generic_handler',\n methods=['GET', ],\n process=process,\n obj=obj\n )\n\n # For every object in the process yields the object creation form\n yield self.page(\n ObjectEditPage,\n f'{object_uri}/<uid>',\n handler='create_modify_object_handler',\n methods=['GET', 'POST'],\n process=process,\n obj=obj\n )\n # TODO: object remove endpoint\n\n # TODO: object actions endpoints\n # Object class methods\n\n # TODO: process actions endopoints\n\n # TODO: System endpoints and controllers",
"def test_access_all_data_all_endpoints(self):\n\n # Some end points just can't be fetched so we have to ignore them.\n end_point_exceptions = [\n \"/api/help/\",\n \"/api/test_host/\",\n \"/api/system_status/\",\n \"/api/updates_available/\",\n \"/api/session/\",\n \"/api/action/\",\n \"/api/run_stratagem/\",\n \"/api/stratagem_configuration/\",\n ]\n\n end_points = self.get_json_by_uri(\"/api/\", args={\"limit\": 0})\n\n for end_point in end_points.values():\n if end_point[\"list_endpoint\"] not in end_point_exceptions:\n import sys\n\n sys.stderr.write(\"\\nReading endpoint %s\\n\" % end_point[\"list_endpoint\"])\n self.get_json_by_uri(end_point[\"list_endpoint\"], args={\"limit\": 0})\n sys.stderr.write(\"\\nRead endpoint %s\\n\" % end_point[\"list_endpoint\"])",
"def run(self):\n log.info(\"{}: Starting {} clients\".format(self.container_id, len(self.clients)))\n self._create_connection()\n for client in self.clients:\n client.open(connection=self.connection)\n return self",
"def _start(self, arbiter):\n self.transport_manager.start()\n for execution_manager in self.execution_managers:\n execution_manager.start()",
"def run(self):\n print(\"[CONNEXION_MANAGER] start connecting\")\n while True:\n self.connexion_init()",
"def start(self):\n for task in self._tasks:\n task.start()",
"def _attach_endpoints(self):\n for name, endpoint in inspect.getmembers(self):\n is_class = inspect.isclass(endpoint)\n is_subclass = is_class and issubclass(endpoint, self.Endpoint)\n not_endpoint = endpoint is not self.Endpoint\n\n if is_subclass and not_endpoint:\n endpoint_instance = endpoint(self.session)\n setattr(self, name.lower(), endpoint_instance)",
"def run(self):\n self._list_servers()",
"def start(self):\n for workload in self._workloads:\n self.log.info(\"%-20s STARTING port=%s\" % (workload.name(), workload.port()))\n workload.pre_start()\n workload.start()\n self._monitor_loop()\n self._cleanup()",
"async def establish_hosts(self):\n scheme = self._config['scheme']\n hosts = self._config['hosts']\n port = self._config['port']\n for hostname in hosts:\n url = '{}://{}:{}/gremlin'.format(scheme, hostname, port)\n host = await driver.GremlinServer.open(\n url, self._loop, **dict(self._config))\n self._hosts.append(host)\n self._hostmap[hostname] = host",
"def start_servers(self, **kwargs):\n self.cleanup()\n\n # Start up the API and default conductor server\n\n # We start the conductor server first, as the API server config\n # depends on the conductor port - this ordering allows for\n # retrying the launch on a port clash\n self.start_with_retry(self.conductor_server, 'conductor_port', 3,\n **kwargs)\n kwargs['conductor_port'] = self.conductor_server.bind_port\n\n self.start_with_retry(self.api_server, 'api_port', 3, **kwargs)",
"def endpoints(self, endpoints):\n\n self._endpoints = endpoints",
"def test_basic_add_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file_before()\n self.write_config_file(config, args)\n collector = execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry_before(mac1, ip1))\n self.assertTrue(self.verify_remote_site_has_entry_before(mac2, ip2))\n\n config = self.create_config_file_after()\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n self.assertTrue(self.verify_remote_site_has_entry_after(mac1, ip1))\n self.assertTrue(self.verify_remote_site_has_entry_after(mac2, ip2))",
"def endpoint_list(self):\n _, body = self.request('/v1.1/endpoints', 'GET')\n return body",
"def start(self):\n for _id in self._workers:\n self.start_action(_id)",
"def addEndpoints(self, endpoints):\n self.endpoints.extend(endpoints)\n self._connectOrBind(endpoints)",
"async def run_requests(self):\n loop = asyncio.get_event_loop()\n tasks = []\n async with aiohttp.ClientSession(connector=self.connector) as session:\n\n for index, id in enumerate(self.ids):\n if id not in self.processed_ids:\n url = self.base_url + id\n auth_token = base64.b64encode(id.encode('ascii'))\n header = {\"Authorization\": auth_token.decode('UTF-8')}\n tasks.append(asyncio.ensure_future(self._request_one(url=url, header=header, id=id, index = index, session = session)))\n\n _ = await asyncio.gather(*tasks)",
"def _construct_endpoints(self):\n # Functions\n async def get_function_list_data(request: web.Request):\n entrypoints = [elm.to_dict() for elm in self._function_manager.definitions.values()]\n return web.json_response(entrypoints)\n\n async def get_function_list_text(request: web.Request):\n rows = []\n for definition in self._function_manager.definitions.values():\n rows.append(definition.function_name)\n rows.append(' URL:')\n rows.append(f' async api: /{definition.function_name}')\n rows.append(f' block api: /{definition.function_name}/keep-connection')\n rows.append(f' Max Concurrency: {definition.max_concurrency}')\n rows.append(' Description:')\n rows.append(f' {definition.description}')\n if len(definition.arg_definitions) == 0:\n rows.append(' No Args')\n else:\n rows.append(' Args')\n for arg in definition.arg_definitions:\n rows.append(f' {arg.name} {arg.type.name} {\"Requiered\" if arg.is_required else \"NOT-Required\"}')\n if arg.description != '':\n rows.append(f' {arg.description}')\n rows.append(f' Timeout: {definition.timeout} sec')\n rows.append('\\n')\n\n return web.Response(text='\\n'.join(rows))\n\n # function\n async def get_function_definition(request: web.Request):\n function_name = request.match_info['function_name']\n\n if function_name not in self._function_manager.definitions:\n raise web.HTTPNotFound()\n\n return web.json_response(self._function_manager.definitions[function_name].to_dict())\n\n async def get_function_running_count(request: web.Request):\n function_name = request.match_info['function_name']\n\n ret = self._function_manager.get_current_number_of_execution(function_name)\n if ret is None:\n raise web.HTTPNotFound()\n\n return web.json_response(ret)\n\n # Tasks\n async def get_task_info(request: web.Request):\n if 'task_id' not in request.match_info:\n raise web.HTTPBadRequest()\n\n task_id = request.match_info['task_id']\n\n task_info = self._function_manager.get_task_info(task_id)\n if task_info is None:\n raise web.HTTPNotFound()\n\n return web.json_response(task_info.to_dict())\n\n async def get_task_done(request: web.Request):\n if 'task_id' not in request.match_info:\n raise web.HTTPBadRequest()\n\n task_id = request.match_info['task_id']\n\n task_info = self._function_manager.get_task_info(task_id)\n if task_info is None:\n raise web.HTTPNotFound()\n\n return web.json_response(task_info.is_done())\n\n async def get_task_result(request: web.Request):\n if 'task_id' not in request.match_info:\n raise web.HTTPBadRequest()\n\n task_id = request.match_info['task_id']\n\n task_info = self._function_manager.get_task_info(task_id)\n if task_info is None:\n raise web.HTTPNotFound()\n return web.json_response(task_info.result)\n\n async def get_task_list(request: web.Request):\n if 'function_name' not in request.match_info:\n raise web.HTTPBadRequest()\n\n function_name = request.match_info['function_name']\n\n tasks = self._function_manager.list_task_info(function_name)\n if tasks is None:\n raise web.HTTPNotFound()\n\n return web.json_response([elm.to_dict() for elm in tasks])\n\n # Termination\n async def post_terminate_function(request: web.Request):\n if 'function_name' not in request.match_info:\n raise web.HTTPBadRequest()\n\n function_name = request.match_info['function_name']\n\n self._function_manager.terminate_function(function_name)\n return web.json_response({})\n\n async def post_terminate_task(request: web.Request, task_id: str):\n if 'task_id' not in request.match_info:\n raise web.HTTPBadRequest()\n\n task_id = request.match_info['task_id']\n self._function_manager.terminate_task(task_id)\n\n return web.json_response({})\n\n api_list = [\n web.get('/function/list/data', get_function_list_data),\n web.get('/function/list/text', get_function_list_text),\n web.get(r'/function/definition/{function_name}', get_function_definition),\n web.get(r'/function/running-count/{function_name}', get_function_running_count),\n web.get(r'/task/info/{task_id}', get_task_info),\n web.get(r'/task/done/{task_id}', get_task_done),\n web.get(r'/task/result/{task_id}', get_task_result),\n web.get(r'/task/list/{function_name}', get_task_list),\n web.post(r'/terminate/function/{function_name}', post_terminate_function),\n web.post(r'/terminate/task/{task_id}', post_terminate_task),\n ]\n\n async def index(request: web.Request):\n return web.Response(text='\\n'.join([elm.path for elm in api_list])+'\\n')\n\n self._app.add_routes([*api_list, web.get('/', index)])",
"def api_endpoints(self):\n endpoints = [\n furl(path=('index', entity_type), args={'size': '1'})\n for entity_type in self.entity_types\n ]\n with ThreadPoolExecutor(len(endpoints)) as tpe:\n status = dict(tpe.map(self._api_endpoint, endpoints))\n status['up'] = all(v['up'] for v in status.values())\n return status",
"def connect_all(self, service=VoidService, config={}):\n return [s.connect(service, config) for s in self.servers]"
] | [
"0.6081534",
"0.60684043",
"0.59456795",
"0.5857773",
"0.58120763",
"0.5776767",
"0.57513654",
"0.56928366",
"0.5676735",
"0.56644404",
"0.563852",
"0.5633027",
"0.5626252",
"0.5604621",
"0.5600612",
"0.559889",
"0.5575616",
"0.5568452",
"0.55555224",
"0.5530493",
"0.55136335",
"0.55102384",
"0.55001825",
"0.5495843",
"0.54927385",
"0.54910356",
"0.54780525",
"0.54657394",
"0.545106",
"0.54333013"
] | 0.6768158 | 0 |
Stop all connected OpenStack endpoints that are connected to this API endpoint. | def stop(self):
for c in self.openstack_endpoints.values():
c.stop()
#for c in self.openstack_endpoints.values():
# if c.server_thread:
# print("Waiting for WSGIServers to be stopped ...")
# c.server_thread.join() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shutdown_all_endpoints(self):\n logger.debug('Removing all endpoints')\n endpoints = []\n with self._endpoint_lock:\n endpoints = list(self._endpoints)\n # be sure we're not holding the lock when shutdown calls\n # _remove_endpoint.\n for e in endpoints:\n e.shutdown()",
"def _stopping(self, sender, **kwargs):\n for v in self._platform_connections.values():\n v.kill()\n\n self._platform_connections.clear()\n\n self.vip.rpc.call(MASTER_WEB, 'unregister_all_agent_routes',\n self.core.identity).get(timeout=30)",
"def terminate(self):\r\n for call in self._deathCandidates.itervalues():\r\n call.cancel()\r\n\r\n self._deathCandidates = {}\r\n\r\n for connection in self._connections.copy():\r\n connection.destroy()\r\n assert len(self._connections) == 0\r\n\r\n Endpoint.terminate(self)",
"def stop(self) -> None:\n for instance in self.instances:\n instance.listener = None\n instance.stop()",
"def stop(self):\r\n for srv in self._servers:\r\n srv.stop()",
"def stop(self):\n for service_id in self.keys():\n self[service_id].stop()\n del self[service_id]\n\n self._stopped = True",
"def stop(self):\n for fd, sock in six.iteritems(self._sockets):\n self.io_loop.remove_handler(fd)\n sock.close()",
"async def _stop_nested_services(self):\n await self._services.stop_all()",
"def stop_all_instances(self):\n print '# Stopping all the instances'\n number = self.compute.stop_all_instances()\n print '%d instances were stopped' % number",
"def cleanUp(self):\r\n for endpoint in self._endpoints.keys():\r\n endpoint.destroy()\r\n\r\n assert len(self._endpoints) == 0",
"def stop(self):\n with self.active_lock:\n self.active = False\n if self.thread:\n self.thread.join()\n for conn in self.connections:\n conn.close()\n for srv_sock in self.srv_socks:\n srv_sock.close()\n for client_sock in self.client_socks:\n client_sock.close()\n self.client_socks = []\n self.srv_socks = []\n self.connections = []\n self.scenario = None",
"def shutdown(self):\n asyncio.cancel(self._server_coro)\n for hid, coro in self.conns.items():\n asyncio.cancel(coro)",
"def stop(self):\n logger.info(\"Shutting down EndpointInterchange\")\n\n # TODO: shut down executors gracefully\n\n # kill_event must be set before quiesce_event because we need to guarantee that once\n # the quiesce is complete, the interchange will not try to start again\n self._kill_event.set()\n self._quiesce_event.set()",
"def stop(self):\n self.api.stop()",
"def stop(self):\n logging.debug(\"footprint/stop entered\")\n logging.info(\"Stopping cloud instances\")\n print \"Stopping machines\"\n for machine in self.machines:\n logging.debug(\"stopping %s\" % machine)\n server = self.machines[machine]\n server.stop()\n \n # monitor until all the machines are down\n active_machines = 1\n while active_machines:\n running = 0\n active_machines = 0\n for machine in self.machines:\n server = self.machines[machine]\n try:\n tmp = cs.servers.get(self.machines[machine].id)\n active_machines = 1\n running = running + 1 \n except novaclient.exceptions.NotFound:\n continue\n # if running == 0:\n # break\n time.sleep(10)\n sys.stdout.write(\".\")\n sys.stdout.flush()\n \n logging.info(\"Stopping Networks\")\n print\n print \"Stopping networks\"\n \n for network in self.networks:\n logging.debug(\"stopping %s\" % str(network))\n n = self.networks[network]\n n.stop()\n \n while True:\n running = 0\n # print self.networks\n for network in self.networks:\n n = self.networks[network]\n\n try:\n tmp = cn.find(id=n.id)\n running = running + 1\n except pyrax.exceptions.NotFound:\n continue\n if running == 0:\n break\n time.sleep(1)\n sys.stdout.write(\".\")\n sys.stdout.flush()",
"def stop(self):\n for task in self._tasks:\n task.stop()",
"def stopall(self):\n\n for i in self.bots:\n try:\n i.stop()\n except:\n pass",
"def disconnect(self):\n for connection in six.itervalues(self.hosts):\n connection.disconnect()",
"def disconnect(self):\n for conn in self.all_connections():\n conn.disconnect()",
"def terminate_all(self):\n self._stop_all('terminate')",
"def killconnections(self):\n for conn in self._connections:\n try:conn.close()\n except:pass\n self._connections=[]",
"def stop(self):\n if not self:\n return\n\n self._disconnect_clients()\n self._transport.close()\n self._stopped = True",
"def destroy(self):\r\n self._endpoint.unregisterInterface(self)\r\n self._endpoint = None\r\n\r\n self._namespace.unregisterInterface(self)\r\n self._namespace = None\r\n\r\n # Endpoint should destroy all connections\r\n assert len(self._connections) == 0\r\n\r\n super(Interface, self).destroy()",
"def stop(self, force=False):\n self.logger.info('Closing all open connections...')\n opened_address_text = ', '.join(\n (address_to_str(k.local_address) for k in self._server_list)\n ) or 'None'\n self.logger.debug('Listening tunnels: ' + opened_address_text)\n self._stop_transport(force=force)\n self._server_list = [] # reset server list\n self.tunnel_is_up = {} # reset tunnel status",
"def disconnect(self):\n for connection in six.itervalues(self):\n connection.disconnect()",
"def kill_all(self) -> None:\n for i in self.ist:\n i.stop_stream()\n i.close()\n for o in self.ost:\n o.stop_stream()\n o.close()",
"def stop_all_nodes(self):\n for node in self.nodes:\n if node.running():\n node.stop()",
"def remote_destroy(self):\r\n for interface in self._interfaces.values():\r\n interface.remote_destroy()\r\n\r\n assert len(self._interfaces) == 0\r\n\r\n if self._endpoint:\r\n self._endpoint.unregisterNamespace(self)\r\n self._endpoint = None",
"async def stop(self):\n # don't process scheduler anymore\n self._scheduler.suspend = True\n\n # process async stop tasks\n await asyncio.wait([\n self._api.stop(),\n self._dns.stop(),\n self._websession.close(),\n self._websession_ssl.close()\n ], loop=self._loop)",
"def stop(self):\n for module in self.asynchronous:\n module.stop()"
] | [
"0.7218453",
"0.71554255",
"0.6913532",
"0.6757825",
"0.6647372",
"0.6572691",
"0.6498089",
"0.64354163",
"0.6427609",
"0.6418356",
"0.63848424",
"0.63806695",
"0.635318",
"0.63259894",
"0.6320032",
"0.63064104",
"0.62986004",
"0.6274114",
"0.622263",
"0.6214541",
"0.6208437",
"0.62052536",
"0.62039346",
"0.6197754",
"0.6195811",
"0.61897385",
"0.6186174",
"0.61859304",
"0.61547923",
"0.6120715"
] | 0.7770021 | 0 |
Download and generate Alexia top 1 million url lists | def get_alexia_urls():
#download top 1 million site urls
zip_top_urls = requests.get(ALEXIA_URL)
response_buf = StringIO.StringIO(zip_top_urls.content)
# unzip contents
zfile = zipfile.ZipFile(response_buf)
buf = StringIO.StringIO(zfile.read('top-1m.csv'))
for line in buf.readlines():
(rank,domain) = line.split(',')
yield (int(rank),domain.strip()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fetch_50(url):\n\n results = requests.get(url,headers = headers).json()\n return results",
"def main(url):\n \n words = fetch_words(url)\n print_items(words)",
"def _fetch_large():\n # Large training data:\n resource(\n target=data_path(\"eeg\", \"SMNI_CMI_TRAIN.tar.gz\"),\n url=\"https://kdd.ics.uci.edu/databases/eeg/SMNI_CMI_TRAIN.tar.gz\",\n )\n dependency(\n target=data_path(\"eeg\", \"train\"),\n source=data_path(\"eeg\", \"SMNI_CMI_TRAIN.tar.gz\"),\n commands=[\n \"tar xzf SMNI_CMI_TRAIN.tar.gz\",\n \"mv SMNI_CMI_TRAIN train\",\n \"find train | grep gz$ | xargs gunzip\",\n ],\n )\n # Large test data:\n resource(\n target=data_path(\"eeg\", \"SMNI_CMI_TEST.tar.gz\"),\n url=\"https://kdd.ics.uci.edu/databases/eeg/SMNI_CMI_TEST.tar.gz\",\n )\n dependency(\n target=data_path(\"eeg\", \"test\"),\n source=data_path(\"eeg\", \"SMNI_CMI_TEST.tar.gz\"),\n commands=[\n \"tar xzf SMNI_CMI_TEST.tar.gz\",\n \"mv SMNI_CMI_TEST test\",\n \"find test | grep gz$ | xargs gunzip\",\n ],\n )",
"def getNBMGenerator():\n limit = 100\n basesearchurl = u'http://nbm-asp.adlibhosting.com/wwwopacx/wwwopac.ashx?database=collect&search=object_name=schilderij&output=json&limit=%s&startfrom=%s'\n baseitemurl = u'http://nbm-asp.adlibhosting.com/wwwopacx/wwwopac.ashx?database=collect&search=priref=%s&output=json'\n baseurl = u'http://collectie.hetnoordbrabantsmuseum.nl/Details/collect/%s'\n\n for i in range(0,11):\n searchurl = basesearchurl % (limit, limit * i,)\n searchPage = requests.get(searchurl)\n searchJson = searchPage.json()\n\n for searchrecord in searchJson.get('adlibJSON').get('recordList').get('record'):\n metadata = {}\n priref = searchrecord.get('@attributes').get('priref')\n itemurl = baseitemurl % (priref,)\n url = baseurl % (priref,)\n\n metadata['url'] = url\n\n itempage = requests.get(itemurl)\n itemjson = itempage.json()\n record = itemjson.get('adlibJSON').get('recordList').get('record')[0]\n\n metadata['collectionqid'] = u'Q12013217'\n metadata['collectionshort'] = u'NBM'\n metadata['locationqid'] = u'Q12013217'\n\n #No need to check, I'm actually searching for paintings.\n metadata['instanceofqid'] = u'Q3305213'\n\n # Get the ID. This needs to burn if it's not available\n metadata['id'] = record['object_number'][0]\n metadata['idpid'] = u'P217'\n\n if record.get('Title'):\n metadata['title'] = { u'nl' : record.get('Title')[0].get('title')[0].get('value')[0],\n }\n\n if record.get('Production') and record.get('Production')[0].get('creator')[0]:\n name = record.get('Production')[0].get('creator')[0].get('value')[0]\n if u',' in name:\n (surname, sep, firstname) = name.partition(u',')\n name = u'%s %s' % (firstname.strip(), surname.strip(),)\n metadata['creatorname'] = name\n\n metadata['description'] = { u'nl' : u'%s van %s' % (u'schilderij', metadata.get('creatorname'),),\n u'en' : u'%s by %s' % (u'painting', metadata.get('creatorname'),),\n }\n else:\n metadata['creatorname'] = u'anonymous'\n metadata['description'] = { u'nl' : u'schilderij van anonieme schilder',\n u'en' : u'painting by anonymous painter',\n }\n metadata['creatorqid'] = u'Q4233718'\n\n # Dimensions are available!\n # Material is available\n\n # Set the inception only if start and end is the same\n if record.get('Production_date') and \\\n record.get('Production_date')[0].get('production.date.start') and \\\n record.get('Production_date')[0].get('production.date.end'):\n proddate = record.get('Production_date')[0].get('production.date.start')[0]\n if proddate == record.get('Production_date')[0].get('production.date.end')[0]:\n metadata['inception']=proddate\n\n yield metadata\n\n return",
"def get_top_1000_movie_links():\n movie_links = []\n for start in range (1, 1000, 50):\n imdb_movie_list_page = requests.get(f'https://www.imdb.com/search/title/?groups=top_1000&view=simple&sort=user_rating,desc&start={start}')\n soup = BeautifulSoup(imdb_movie_list_page.text, 'html.parser')\n\n movie_list_div = soup.find('div', attrs={'class': 'lister list detail sub-list'})\n movie_item_spans = movie_list_div.find_all('span', attrs={'class':'lister-item-header'})\n links = [item.find('a').attrs['href'] for item in movie_item_spans]\n\n movie_links += links\n\n return [f'https://www.imdb.com{l}fullcredits/' for l in movie_links]",
"def main(url):\n words = fetch_words(url)\n print_items(words)",
"def analyze(url):\n\n #Note : Using the function to count repeated words and sorted by value\n\n print('\\n\\nVisiting',url)\n print('The most 25 common word')\n print('\\n{:30} {:6}\\n'.format('Word','Count'))\n\n content = urlopen(url).read().decode()\n collector = Collector(url)\n collector.feed(content)\n urls = collector.getLinks()\n\n words_lst = collector.getdata()\n print(words_lst)\n # word_count = Counter(words_lst) # use collection\n # most_25_common = word_count.most_common(25) #\n\n word_count = frequency(words_lst)\n sorted_word_count = sorted(word_count.items(), key = lambda x : x[1],reverse= True)\n\n for word,count in sorted_word_count[:25]:\n print ('{:30}{:5}'.format(word,count))\n\n #return word_count\n\n # for word,count in most_25_common:\n # print('{:30} {:5}'.format(word,count))\n # return urls",
"def main(url):\n words = fetch_words(url)\n\n print_items(words)",
"def divide_url_all():\n\tf = open(\"url_all.txt\", \"r+\")\n\turl_amount = 0\n\tfile_num = 1\n\tline = f.readline()\n\tsub_f = open(\"url_\"+str(file_num)+\".txt\", \"w+\")\n\twhile(line != \"\"):\n\t\t#print (\"line : \" + line )\n\t\turl_amount += 1\n\t\tsub_f.write(line)\n\t\tif url_amount > 33999:\n\t\t\tsub_f.close()\n\t\t\turl_amount = 0\n\t\t\tfile_num += 1\n\t\t\tsub_f = open(\"url_\"+str(file_num)+\".txt\", \"w+\")\n\t\tline = f.readline()\n\tsub_f.close()\n\treturn file_num",
"def ordered_crawling():\n queue.append(seed_url)\n visited.add(seed_url)\n while len(queue) >= 0:\n try:\n text = req_obj.get_html_text(queue[0])\n print queue[0]\n if text is None:\n raise requests.RequestException()\n add_links_to_queue(text, queue[0])\n # summary generated using summarizer1\n sum_obj.create_and_index_summary(\n req_obj.get_base_url(), text)\n\n # summary generated using summarizer2\n sum_obj2.create_and_index_summary(\n req_obj.get_base_url(), text)\n on_pg_sum.index_on_page_summary(text, queue[0])\n\n result_file.write(str(queue[0]) + \", \" + str(link_weights[queue[0]]))\n er_file.write(\"###########\" + str(link_weights) + \"\\n\\n\\n\\n\")\n update_weights(text)\n queue.sort(compare)\n result_file.write(\"\\n\")\n except requests.RequestException as trace:\n print str(trace) + '\\n'\n er_file.write(queue[0] + '\\n')\n er_file.write(str(trace) + '\\n\\n')\n del link_weights[queue[0]]\n queue.pop(0)",
"def main():\n proxy = get_random_proxy()\n html = crawl(target_url)\n company_all_url = html.xpath('//*[@id=\"quotesearch\"]/ul/li/a/@href')\n code=['none']*len(company_all_url)\n for i in range(len(company_all_url)):\n s = str(str(company_all_url[i]))\n code[i]=s[(len(s) - 13):(len(s) - 5)]\n save_to_neo4j(code,0,len(code))",
"def populate_index(db):\n\tfor url in URL:\n\t\tprint url\n\t\trequest = urllib2.Request(url)\n\t\ttry :\n\t\t\tresponse = urllib2.urlopen(request)\n\t\texcept urllib2.URLError:\n\t\t\tprint \"Network Unreachable \"\n\t\t\tsys.exit()\t\n\t\ttext = html2text(response.read())\n\t\tdb.generate_index(text,url)",
"def download_publications(pmids_l):\n stepsize = 50\n all_data = []\n\n for i in range(0, len(pmids_l), stepsize):\n subset = pmids_l[i:i + stepsize]\n pmids = \"\"\n for id in subset[:-1]:\n pmids += id + ','\n pmids += subset[-1]\n\n response = req.urlopen(URL_DOWNLOAD.format(pmids)).read().decode('utf-8')\n response = json.loads(response)\n all_data.extend(deepcopy(response))\n\n return all_data",
"def large_train_collection(train_items: List[JSONDict]) -> TrainCollection:\n items = []\n\n item = train_items[0]\n for i in range(3000):\n copy = item.copy()\n copy[\"url\"] = copy[\"url\"].replace(\"post1\", f\"post{i}\")\n items.append(copy)\n\n collection = TrainCollection(items=items)\n return collection",
"def fetch(self,url=URL):\n\t\tlog.info('downloading latest PHE case data')\n#\t\tself.data=lookup_json(url)\n\t\tself.fetch_csv() #JSON discontinued; switched back to CSV\n\t\tself.edition=self.latest_samples\n\t\tlog.info(f'Last samples from {self.edition}')",
"def getData(constrain):\n\n dat_AGS = chunks(AGS, 100)\n for num, ags_c in enumerate(dat_AGS):\n to_download = DOWNLOAD_LINK.format(ags_id=ags_c, constrain=constrain)\n to_download = to_download.replace(\" \", \"\")\n download_name = \"../Data/Gemeinden/{}-{}.csv\".format(\n constrain, num)\n\n url.urlretrieve(to_download, filename=download_name)\n\n sleep(1) # be nice\n\n return(num)",
"def download(word, n_images=100):\n\n # Fields for pixbay from https://pixabay.com/api/docs/#api_search_images\n\n http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())\n\n for i in range(5):\n fields = {\n \"key\": _(s.__secret__, egg_open()),\n \"q\": word,\n \"image_type\": \"photo\",\n \"safesearch\": \"true\",\n \"per_page\": max(3, min(200, n_images + i))\n }\n\n debug_log(f\"fields for request:\\n{ {key: fields[key] for key in fields.keys() if key != 'key'} }\")\n\n r = http.request(method='GET',\n url='https://pixabay.com/api/',\n fields=fields)\n\n debug_log(f\"Response data: {r.data}\")\n\n if \"ERROR\" in str(r.data, 'utf-8'):\n continue\n else:\n break\n\n try:\n data = json.loads(r.data.decode('utf-8'))\n except json.decoder.JSONDecodeError as e:\n warnings.warn(\"Cannot download '{word}'. Bad response: {response}\".format(\n word=word,\n response=str(r.data, 'utf-8')\n ))\n return False\n\n image_urls = [item[\"largeImageURL\"] for item in data[\"hits\"]]\n image_ids = [item[\"id\"] for item in data[\"hits\"]]\n\n\n debug_log(f\"Image urls: {image_urls}\")\n debug_log(f\"Len Image urls: {len(image_urls)}\")\n\n save_dir = os.path.join(s.__STEP_1_CACHE_DIR__, word)\n os.makedirs(save_dir, exist_ok=True)\n\n if len(image_urls) < n_images:\n warnings.warn(\"Not enough images for {word}. Only {len_image_urls} instead of {n_images}.\".format(\n word=word,\n len_image_urls=len(image_urls),\n n_images=n_images\n ))\n open(os.path.join(save_dir, \"SATURATED\"), 'w').close()\n open(os.path.join(save_dir, \"DO_NOT_DELETE\"), 'w').close()\n\n image_paths = [get_unique_save_path_name(save_dir,\n im_id,\n im_url.split('.')[-1]) # Get the right image extension\n for im_id, im_url in zip(image_ids, image_urls)]\n\n debug_log(f\"Image paths: {image_paths}\")\n\n for i, im_url, im_path in zip(range(len(image_urls)), image_urls, image_paths):\n debug_log(f\"Downloading '{word}' image [{i+1}/{len(image_urls)}]: {im_url}\")\n save_file(im_url, im_path, http)\n debug_log(f\"Done! Saved as {im_path}\")\n\n return True",
"def get_urls_from_database():\n return select(u for u in Url if u.date_scanned is None).order_by(desc(Url.priority_scan))[:8]",
"def run_scrapping():\n date = datetime.now().strftime(\"%Y-%m-%d\")\n size = 100\n r = list(range(size))\n random.shuffle(r)\n for i in r:\n scrap_page(url_page.format(i), date)\n print(str(i) + \" / \" + str(size))",
"def sina_weibo_emotion4(root):\n start = time.time()\n task_path = assert_dirs(root, 'chinese_reviews_sina_weibo_emotion4')\n url_json = 'https://raw.githubusercontent.com/Hourout/datasets/master/nlp/chinese_reviews_sina_weibo_emotion4/chinese_reviews_sina_weibo_emotion4.json'\n url_txt = ['https://raw.githubusercontent.com/Hourout/datasets/master/nlp/chinese_reviews_sina_weibo_emotion4/chinese_reviews_sina_weibo_emotion4_01.txt',\n 'https://raw.githubusercontent.com/Hourout/datasets/master/nlp/chinese_reviews_sina_weibo_emotion4/chinese_reviews_sina_weibo_emotion4_02.txt',\n 'https://raw.githubusercontent.com/Hourout/datasets/master/nlp/chinese_reviews_sina_weibo_emotion4/chinese_reviews_sina_weibo_emotion4_03.txt',]\n rq.json(url_json, path_join(task_path, 'chinese_reviews_sina_weibo_emotion4.json'))\n data = pd.DataFrame()\n for url in url_txt:\n s = requests.get(url).content\n data = pd.concat([data, pd.read_csv(io.StringIO(s.decode('utf-8')))])\n data.to_csv(path_join(task_path, 'chinese_reviews_sina_weibo_emotion4.txt'), index=False)\n print('chinese_reviews_sina_weibo_emotion4 dataset download completed, run time %d min %.2f sec' %divmod((time.time()-start), 60))\n return task_path",
"def get_paged_request(url):\r\n results = []\r\n while url:\r\n print(\"fetching %s\" % url, file=sys.stderr)\r\n f = urlopen(url)\r\n results.extend(json.load(f))\r\n links = parse_link_header(f.headers)\r\n url = links.get('next')\r\n return results",
"def main():\n\t\tn = 0 \n\t\tfor page in range(pages):\n\t\t\t\tpageNumber = str(page + 1)\n\t\t\t\tprint \"Processing page number \" + pageNumber\n\t\t\t\tpageUrl = 'https://api.github.com/users/' + USER + '/gists?page=' + pageNumber + '&per_page=' + str(int(perpage))\n\t\t\t\tu = urlopen (pageUrl)\n\t\t\t\tgists = json.load(u)\n\t\t\t\t\t\t \n\t\t\t\tfor gist in gists:\n\t\t\t\t\t\tn += 1\n\t\t\t\t\t\tprint \"==== %d ====\" % n\n\t\t\t\t\t\t# print gist.keys()\n\t\t\t\t\t\tgistd = gist['id']\n\t\t\t\t\t\tgisturl = gist['html_url']\n\t\t\t\t\t\tgistdesc = gist['description'] or gistd\n\t\t\t\t\t\tgistfiles = gist['files']\n\t\t\t\t\t\tprint \"gistd: \", gistd\n\t\t\t\t\t\tprint \"gisturl: \", gisturl\n\t\t\t\t\t\tprint \"gistdesc: \", gistdesc\n\t\t\t\t\t\tprint \"gistfiles: \", len(gistfiles)\n\t\t\t\t\t\tfor f in gistfiles:\n\t\t\t\t\t\t\t\tfileurl = gistfiles[f]['raw_url']\n\t\t\t\t\t\t\t\t_filetype = gistfiles[f]['language']\n\t\t\t\t\t\t\t\tif _filetype in ALLOWED_FILE_TYPES:\n\t\t\t\t\t\t\t\t\t\tfiletype = _filetype\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tfiletype = \"None\"\n\t\t\t\t\t\t\t\tprint \"fileurl: \", fileurl \n\t\t\t\t\t\t\t\tprint \"filetype: \", filetype, \"(found='%s')\" % _filetype \n\t\t\t\t\t \n\t\t\t\t\t\t\t\tif TESTING:\n\t\t\t\t\t\t\t\t\t\t# testing\n\t\t\t\t\t\t\t\t\t\treq = urlopen(fileurl)\n\t\t\t\t\t\t\t\t\t\tbodytext = req.read()\n\t\t\t\t\t\t\t\t\t\tencoding=req.headers['content-type'].split('charset=')[-1]\n\t\t\t\t\t\t\t\t\t\tucontent = unicode(bodytext, encoding)\n\t\t\t\t\t\t\t\t\t\tbodytext = \"# \" + gisturl + \"\\n\\n\" + ucontent\n\t\t\t\t\t\t\t\t\t\t# bodytext = ucontent\n\t\t\t\t\t\t\t\t\t\timport_dash(gistdesc, bodytext, filetype)\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\t\t\treq = urlopen(fileurl)\n\t\t\t\t\t\t\t\t\t\t\t\tbodytext = req.read()\n\t\t\t\t\t\t\t\t\t\t\t\tencoding=req.headers['content-type'].split('charset=')[-1]\n\t\t\t\t\t\t\t\t\t\t\t\tucontent = unicode(bodytext, encoding)\n\t\t\t\t\t\t\t\t\t\t\t\tbodytext = \"# \" + gisturl + \"\\n\\n\" + ucontent\n\t\t\t\t\t\t\t\t\t\t\t\t# bodytext = ucontent\n\t\t\t\t\t\t\t\t\t\t\t\timport_dash(gistdesc, bodytext, filetype)\n\t\t\t\t\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\t\t\t\t\t\tprint e\n\t\t\t\t\t\t\t\t\t\t\t\tprint \"*** ERROR WRITING TO sqlite3 ***\"\n\t\t\t\t\t\t\t\t\t\t\t\tpass\n\n\t\t\t\tif TESTING:\n\t\t\t\t\t\t# so to avoid calling github API too much...\n\t\t\t\t\t\tbreak",
"def getAllListPage():\n firstPage = city + '/line1'\n data = urlopen(firstPage).read().decode('gbk')\n urlList = getLineTypeList(data)\n urlList.append(firstPage)\n num = len(urlList)\n i = 0\n p = Pool(processes=4)\n pageData = p.map(readData, urlList)\n# manager = Manager()\n# pageData = manager.list()\n# while i < num:\n# procline = Process(target=readData, args=(urlList[i], pageData,))\n# procline.start()\n# procline.join()\n# i += 1\n return pageData",
"def bitbucket_paginate(session, url):\n result = []\n while url:\n r = session.get(url)\n result.extend([r.json()])\n next_url = r.json().get('next')\n if next_url:\n url = next_url\n else:\n url = None\n return result",
"def gather_all_profiles(year, month):\n page = 1\n urls = []\n\n print(\"{}-{} : Begin indexing.\".format(year, month))\n\n while (page > 0):\n urlstring = \"http://scamdigger.com/{}/{}/page/{}\".format(year,month,page) \n jitter = random.choice([0,1])\n try:\n urlhandle = urlopen(urlstring)\n urls += enumerate_profiles(urlhandle, page)\n # time.sleep(1+jitter)\n page += 1\n except:\n page = 0\n\n print(\"{}-{} : {} profiles\".format(year,month,len(urls)))\n\n for url in urls:\n uid = url[30:-1]\n outfile=PROFILES+os.sep+uid+'.json'\n jitter = random.choice([0,1])\n try:\n urlhandle = urlopen(url)\n scrape_profile(urlhandle, outfile, year, month)\n # time.sleep(1+jitter)\n except Exception as e:\n print(\"Exception when handling {}\".format(url))\n print(e)\n \n print(\"{}-{} : complete.\".format(year,month))",
"def get_urls(*params: str, num_of_links: int = 1) -> list:\n urls = []\n try:\n for i in range(num_of_links):\n url = \"https://finviz.com/screener.ashx?v=111\"\n codes = ','.join(rts_codes[len(rts_codes)*(num_of_links - i - 1)//num_of_links:(len(rts_codes)*(num_of_links - i)//num_of_links)])\n payload = {\"FT\": 2,\"f\": params,\"t\": codes}\n req = requests.get(url, params=payload)\n if len(req.url) > 2900:\n urls = []\n num_of_links += 1\n urls = get_urls(*params, num_of_links=num_of_links)\n else:\n urls.append(req.url)\n return (urls)\n except Exception as e:\n print (e)\n return None",
"def main():\n category_list = []\n h = httplib2.Http('.cache')\n websites = [\"http://www.goodreads.com/genres/list?page=1\", \"http://www.goodreads.com/genres/list?page=2\",\n \"http://www.goodreads.com/genres/list?page=3\"]\n for website in websites:\n response, content = h.request(website)\n if response.status != 200:\n print(\"Status code \", response.status)\n return\n soup = BeautifulSoup(content, 'html.parser')\n data = soup.find_all(\"a\", class_=\"mediumText actionLinkLite\")\n for x in data:\n category_list.append(str(x.string))\n\n data = \"category_list = \" + str(category_list)\n\n with open(\"InitialDataExtraction/category_list.py\", mode='w', encoding=\"utf-8\") as a_file:\n a_file.write(data)\n print(len(category_list))",
"def query_and_fetch(query, top_n=12):\n global url_details, url_text\n print('Query: ' + query + '; Top N: ' + str(top_n))\n url_details = []\n url_text = []\n driver = None\n bad_request = False\n try:\n driver = Fetcher.get_selenium_driver()\n driver.get('https://api.duckduckgo.com/?q=' + query + '&kl=wt-wt')\n except:\n print('An error occurred while searching query: ' + query)\n Fetcher.close_selenium_driver(driver)\n Fetcher.search_driver = None\n bad_request = True\n finally:\n try:\n if not bad_request:\n results = driver.find_elements_by_class_name('result__a')\n result_size = len(results)\n print('Result Size: ' + str(result_size))\n while result_size > 0 and len(url_details) < top_n:\n urls = []\n for element in results:\n new_url = element.get_attribute('href')\n # TODO: Filter URLs if required\n print(new_url)\n urls.append(new_url)\n\n fetched_result = Fetcher.fetch_multiple(urls, top_n)\n\n for fetched_data in fetched_result:\n if not fetched_data[1] or len(fetched_data[1].strip()) == 0:\n continue\n details = dict()\n details['url'] = fetched_data[0]\n details['html'] = fetched_data[1]\n details['title'] = fetched_data[2]\n details['label'] = predict(fetched_data[3])\n url_details.append(details)\n url_text.append(fetched_data[3])\n if len(url_details) == top_n:\n break\n\n # Infinite Scroll\n if len(url_details) < top_n:\n driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')\n results = driver.find_elements_by_class_name('result__a')\n results = results[result_size:]\n result_size = len(results)\n print('Moved to Next Page. Result Size: ' + str(result_size))\n except:\n print('An error occurred while searching query: '+ query + ' and fetching results')\n #finally:\n # if driver is not None:\n # Fetcher.close_selenium_driver(driver)\n setattr(flask.current_app, 'url_text', url_text)\n print('Search Completed')\n return url_details",
"def test_get_result_top_files(self):\n pass",
"def readurl(request):\n url = request.match_info.get('url', \"Anonymous\")\n file_queue = request.app['file_queue']\n\n logger.info('file queue size: %s' % file_queue.qsize())\n logger.info('handling url: %s' % url)\n request.app['mk'].set_chain(markov.MarkovGenerator())\n try:\n file_queue.put_nowait(url)\n request.app['mk'].sources.append(url)\n success = True\n except:\n success = False\n logger.info('file queue size: %s' % file_queue.qsize())\n return web.json_response(dict(\n success=success,\n modelName='_'.join(url.split('/')[-2:])\n ))"
] | [
"0.6438779",
"0.6052566",
"0.600989",
"0.5963049",
"0.59457004",
"0.59386194",
"0.5916427",
"0.59098995",
"0.5860502",
"0.5847924",
"0.57740724",
"0.5756279",
"0.575132",
"0.5734446",
"0.5716643",
"0.56921947",
"0.5688688",
"0.56574255",
"0.56539947",
"0.5652997",
"0.5643443",
"0.5623207",
"0.5613691",
"0.5607355",
"0.5605981",
"0.55977786",
"0.5590268",
"0.5587088",
"0.55806625",
"0.5575671"
] | 0.7255304 | 0 |
Format new sequence so it matches the type of the original sequence. | def format_seq(seq, new_seq):
if type(seq) == str:
return "".join(new_seq)
elif type(seq) == tuple:
return tuple(new_seq)
else:
return new_seq | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def asformat(self, format):",
"def test_sequence_to_moltype(self):\n s = Sequence(\"TTTTTTTTTTAAAA\", name=\"test1\")\n annot1 = s.add_annotation(Feature, \"exon\", \"fred\", [(0, 10)])\n annot2 = s.add_annotation(Feature, \"exon\", \"trev\", [(10, 14)])\n got = s.to_moltype(\"rna\")\n annot1_slice = str(annot1.get_slice())\n annot2_slice = str(annot2.get_slice())\n got1_slice = str(got.annotations[0].get_slice())\n got2_slice = str(got.annotations[1].get_slice())\n self.assertNotEqual(annot1_slice, got1_slice)\n self.assertEqual(annot2_slice, got2_slice)\n self.assertEqual(got.moltype.label, \"rna\")\n self.assertEqual(got.name, \"test1\")\n\n s = Sequence(\"AAGGGGAAAACCCCCAAAAAAAAAATTTTTTTTTTAAA\", name=\"test2\")\n xx_y = [[[2, 6], 2.4], [[10, 15], 5.1], [[25, 35], 1.3]]\n y_valued = s.add_annotation(Variable, \"SNP\", \"freq\", xx_y)\n got = s.to_moltype(\"rna\")\n y_valued_slice = str(y_valued.get_slice())\n got_slice = str(str(got.annotations[0].get_slice()))\n self.assertNotEqual(y_valued_slice, got_slice)\n self.assertEqual(got.moltype.label, \"rna\")\n self.assertEqual(got.name, \"test2\")\n\n s = Sequence(\"TTTTTTTTTTAAAAAAAAAA\", name=\"test3\")\n data = [i for i in range(20)]\n annot4 = s.add_annotation(SimpleVariable, \"SNP\", \"freq\", data)\n got = s.to_moltype(RNA)\n annot4_slice = str(annot4.get_slice())\n got_slice = str(str(got.annotations[0].get_slice()))\n self.assertNotEqual(annot4_slice[:10], got_slice[:10])\n self.assertEqual(annot4_slice[10:20], got_slice[10:20])\n self.assertEqual(got.moltype.label, \"rna\")\n self.assertEqual(got.name, \"test3\")\n\n # calling with a null object should raise an exception\n with self.assertRaises(ValueError):\n s.to_moltype(None)\n\n with self.assertRaises(ValueError):\n s.to_moltype(\"\")",
"def reformat(self, newformat):\n # check whether the column is defined\n if self._defined:\n # get the appropriate null-format\n nullformat = self._get_nullformat(newformat)\n # set the new formats\n self._format = [newformat, nullformat]\n else:\n # first the column type must be defined\n raise Exception('The data type of this column is not yet defined!')",
"def transformation_seq(self, sequence: str):\n\n # Add '$' after the sequence\n seq = sequence.upper() + \"$\"\n\n # Initialization of the square matrix of all the offsets of the sequence\n seq_matrix = [seq]\n\n previous_seq = seq\n\n # Filling of the square matrix\n for i in range(0, len(seq)-1, 1):\n next_seq = previous_seq[len(seq)-1] + previous_seq[0:len(seq)-1]\n # Complete list for print step by step\n self.list_step_trans_seq.append(next_seq)\n seq_matrix.append(next_seq)\n previous_seq = next_seq\n\n # Sorting the square matrix and display\n self.sort_and_print_matrix(seq_matrix, self.list_el_matrix_final_trans)\n\n # Recovering the last character of each line\n bwt = \"\"\n\n for line in seq_matrix:\n bwt += line[len(line)-1]\n\n self.save(bwt)\n\n return bwt",
"def convert_format(self, new_format):\n if new_format not in [0, 1, 2, 3]:\n raise ValueError(\"Unknown format specified\")\n\n inp_format = new_format\n if inp_format == 3:\n new_format = 2\n\n for block in self.frd.blocks:\n if hasattr(block, 'format'):\n block.format = new_format\n\n self.frd.node_block.format = inp_format",
"def change_biopython_record_sequence(record, new_seq):\n new_record = deepcopy(record)\n\n if has_dna_alphabet:\n seq = Seq(new_seq, alphabet=DNAAlphabet())\n else:\n seq = Seq(new_seq)\n\n new_record.seq = seq\n return new_record",
"def test_model_to_regular(self):\n r = RNA.make_array_seq(\"AAA\", name=\"x\")\n s = RNA.make_seq(r)\n self.assertEqual(str(s), \"AAA\")\n self.assertEqual(s.moltype, RNA)\n self.assertEqual(s.name, \"x\")",
"def test_regular_to_regular(self):\n r = RNA.make_seq(\"AAA\", name=\"x\")\n s = RNA.make_seq(r)\n self.assertEqual(str(s), \"AAA\")\n self.assertEqual(s.moltype, RNA)\n self.assertEqual(s.name, \"x\")",
"def fix_seq(self, fixed_seq):\n self.wc.fix_seq(wc(fixed_seq))",
"def reformat(self, seq_name, *, prefix=\"s\"):\n\t\treturn \"%s_%012u\" % (prefix, self.get_sid(seq_name))",
"def test_regular_to_model(self):\n r = RNA.make_seq(\"AAA\", name=\"x\")\n s = RNA.make_array_seq(r)\n self.assertEqual(str(s), \"AAA\")\n self.assertEqual(s.moltype, RNA)\n self.assertEqual(s.name, \"x\")",
"def encode(self, seq):",
"def align(self):\n number_of_Xs = 0\n xFront = \"\"\n xEnd = \"\"\n dashFront = \"\"\n dashEnd = \"\"\n\n # Determining if variable amino acids (\"X\") need to be added to the\n\t # beginning of the sequence:\n z = self.hmmStart-self.seqStart\n number_of_Xs = (self.hmmStart-1)-z\n if z > 0:\n dashFront = \"-\"*z\n xFront = \"X\"*number_of_Xs\n elif self.hmmStart-1<=self.seqStart-1:\n xFront = \"X\"*(self.hmmStart-1) \n\n # Determining if variable amino acids (\"X\") need to be added to the \n # end of the sequence:\n number_of_Xs_end = self.hmmLength - self.hmmEnd\n\n # The original sequence length; SPA format includes this\n delimeter = \"|\" #Need to fix can be \"_\" or \"|\" or something else...\n \n distToSeqEnd = self.origSeqLength - seqTo\n if distToSeqEnd >= number_of_Xs_end and number_of_Xs_end != self.hmmLength:\n xEnd = 'X'*number_of_Xs_end\n else:\n if distToSeqEnd < number_of_Xs_end:\n xEnd = 'X'*distToSeqEnd\n \tdashEnd += \"-\"*(number_of_Xs_end-distToSeqEnd)\n \t\n begin = \"{}{}\".format(dashFront, xFront)\n end = \"{}{}\".format(xEnd, dashEnd)\n self.addToFront(begin)\n self.data.extend(end)\n self.original = str(self)",
"def set_SEQUENCE(self, newSeq):\n\t\tself.SEQUENCE = newSeq\n\t\tself.LENGTH = len(newSeq)",
"def reformat(x):\n x = x.permute(0, 2, 3, 1)\n N, D1, D2, Feat = x.size()\n x = x.view(N, D1 * D2, Feat)\n return x",
"def reformat(x):\n x = x.permute(0, 2, 3, 1)\n N, D1, D2, Feat = x.size()\n x = x.view(N, D1 * D2, Feat)\n return x",
"def format(self):\n ...",
"def prepseq(self, seq):\n\n wtf = re.sub(r'\\*$', '', seq)\n return wtf",
"def format_alignment(self, alignment):\n raise NotImplementedError(\"This method should be implemented\")\n ###################################################\n # You MUST implement this method in the subclass. #\n ###################################################",
"def fasta_format(self, line_width=None):\n return fasta_formatted_string(self.name, self._sequence,\n description=self.description,\n line_width=line_width)",
"def format(self, item):\n raise NotImplementedError()",
"def sequence_type(self) -> str:\n raise NotImplementedError()",
"def _get_nullformat(self, newformat):\n if self._type == int:\n length = len(str(newformat % 1))\n return '%'+str(length)+'s'\n elif self._type == float:\n length = len(str(newformat % 1.0))\n return '%'+str(length)+'s'\n else:\n return newformat",
"def test_to_fasta(self):\n even = \"TCAGAT\"\n odd = even + \"AAA\"\n even_dna = self.SequenceClass(even, name=\"even\")\n odd_dna = self.SequenceClass(odd, name=\"odd\")\n self.assertEqual(even_dna.to_fasta(), \">even\\nTCAGAT\\n\")\n # set line wrap to small number so we can test that it works\n self.assertEqual(even_dna.to_fasta(block_size=2), \">even\\nTC\\nAG\\nAT\\n\")\n self.assertEqual(odd_dna.to_fasta(block_size=2), \">odd\\nTC\\nAG\\nAT\\nAA\\nA\\n\")\n # check that changing the linewrap again works\n self.assertEqual(even_dna.to_fasta(block_size=4), \">even\\nTCAG\\nAT\\n\")",
"def exchange_first_last(seq):\n # Create new list and set it to the last element of the original sequence\n new_seq = [seq[-1]]\n\n # Add the middle elements from the original sequence\n new_seq.extend(seq[1:-1])\n\n # Add the first element from the original sequence\n new_seq.append(seq[0])\n\n # Run new sequence through formatting function\n return format_seq(seq, new_seq)",
"def test_to_phylip(self):\n s = self.SequenceClass(\"ACG\", name=\"xyz\")\n self.assertEqual(s.to_phylip(), \"xyz\" + \" \" * 27 + \"ACG\")",
"def to_seq_record(self):\n\t\t#create the anotations in a pythonic manner\n\t\texempt = ['name', 'description', 'features', 'sequence'] #things which aren't annotations\n\t\tannotations = { }\n\t\tfor key, value in self.__dict__.iteritems():\n\t\t\tif key.lower() not in exempt:\n\t\t\t\tannotations[key] = value\n\t\t\n\t\t#create the features\n\t\tfeatures = []\n\t\tfor feat in self.features:\n\t\t\tfeatures.append( SeqFeature( \n\t\t\t\tlocation = FeatureLocation(feat['startpos'] - 1, feat['endpos']), #NB partsregistry uses 1-offset, and inclusive.\n\t\t\t\ttype = feat['type'],\n\t\t\t\tstrand = feat['strand'],\n\t\t\t\tqualifiers = {'title': feat['name'],}))\n\t\t\n\t\treturn SeqRecord(\tself.sequence, \n\t\t\t\t\t\t\tid=self.name,\n\t\t\t\t\t\t\tname=self.name,\n\t\t\t\t\t\t\tdescription=self.description,\n\t\t\t\t\t\t\tfeatures=features,\n\t\t\t\t\t\t\tannotations=annotations)",
"def format(self, data):",
"def create_timestructured(self, good, quantity):\n length = len(self._haves[good].time_structure)\n for i in range(length):\n qty = quantity[i] if type(quantity) == list else quantity / length\n self._haves[good].time_structure[i] += qty",
"def reformat():\n toolkit.reformat()"
] | [
"0.5782192",
"0.57681745",
"0.56224316",
"0.5617714",
"0.5553125",
"0.55416995",
"0.5499011",
"0.5464796",
"0.5445919",
"0.54286814",
"0.5403455",
"0.5346777",
"0.5314133",
"0.5298421",
"0.52948034",
"0.52948034",
"0.52077514",
"0.5198649",
"0.5171699",
"0.51702905",
"0.5166072",
"0.5154686",
"0.5140889",
"0.51311094",
"0.51305103",
"0.5109224",
"0.510711",
"0.5096912",
"0.5056265",
"0.50044894"
] | 0.73359036 | 0 |
Return sequence with every other item removed. | def remove_every_other(seq):
# Make a copy of the original sequence and step by 2
new_seq = seq[::2]
return new_seq | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_every_other_item(seq):\n seq_copy = seq [0::2]\n return seq_copy",
"def remove_every_other(seq):\n length = len(seq)\n new_seq = seq[0:length:2]\n return new_seq",
"def remove_four_and_every_other(seq):\n # Make a copy of the original sequence, but omit the first four and last four elements\n new_seq = seq[4:-4]\n\n # Make a copy of new sequence and step by 2\n new_seq = new_seq[::2]\n\n return new_seq",
"def remove_every_other(lst):\n return [ea for ea in lst if lst.index(ea) % 2 == 0 ]",
"def remove_4s_every_other_in_between(seq):\n seq_copy = seq [4:-4:2]\n return seq_copy",
"def removeDup(item, seq):\n return [x for x in seq if x != item]",
"def dedupe(self):\n elems = []\n for x in self.elems:\n if x not in elems:\n elems.append(x)\n return _coconut_tail_call(self.__class__, *elems)",
"def get_pairs_to_delete(cycle):\n\n pairs = []\n for i, (_, right) in enumerate(cycle):\n left = cycle[(i - 1) % len(cycle)][0]\n successors = right.prefs[right.prefs.index(left) + 1 :]\n for successor in successors:\n pair = (right, successor)\n if pair not in pairs and pair[::-1] not in pairs:\n pairs.append((right, successor))\n\n return pairs",
"def rm(x, l):\n return [y for y in l if x != y]",
"def remove_elements(l, e):\n return [x for x in l if x != e]",
"def instrsreversed(self):\n x = self._lastInstr\n while x is not None:\n # now we can remove x and continue iterating :)\n x_prev = x.prev\n yield x\n x = x_prev",
"def every_other(seq):\n seq = seq[::2]\n return seq",
"def without(values, seq):\n for item in seq:\n if item not in values:\n yield item",
"def every_other(seq):\n every_other = seq[::2]\n return every_other",
"def removeDegenerate(self):\n return self[~self.testDegenerate()]",
"def drop(self):\n for step in self.steps:\n step[1].drop()",
"def every_other(seq):\n return seq[::2]",
"def trim_items(self, items):\r\n\t\tlogger.debug(\"Enter\")\r\n\t\t\r\n\t\tif self.transactions:\r\n\t\t\tall_items = set.union(*[self.transactions[u][-1] for u in self.transactions.keys()])\r\n\t\telse:\r\n\t\t\treturn items\r\n\t\t\t\r\n\t\ttmp = items.copy()\r\n\t\t\r\n\t\tfor i in items:\r\n\t\t\tif i in all_items:\r\n\t\t\t\tlogger.debug(\"Removing %r\" % i)\r\n\t\t\t\ttmp.remove(i)\r\n\t\t\t\t\r\n\t\tlogger.debug(\"Exit\")\r\n\t\treturn tmp",
"def difference_update(self, other):\n if not isinstance(other, (list, np.ndarray, IndexCollection)):\n other = [other]\n for item in other:\n self.discard(item)\n return self",
"def pop(self):\r\n if len(self.s2)!=0:\r\n return self.s2.pop()\r\n while len(self.s1)!=0:\r\n self.s2.append(self.s1.pop())\r\n return self.s2.pop()",
"def difference(seq, *seqs):\n yield from differenceby(None, seq, *seqs)",
"def rem_odd(seq):\n\treturn seq[::2]\n\n\ta_string = \"this is a string\"\n\ta_tuple = (1,2,3,4,5,6)\n\n\tassert rem_odd(a_string) == 'ti sasrn'\n\tassert rem_odd(a_tuple) == (1,3,5)",
"def removeDoubleUnbondedAtoms (self):\r\n atomsToRemove = [] # Stores index of atoms we will need to remove\r\n \r\n # Go through each mol\r\n for i in range(len(self.mol)):\r\n # Atom is disconnected if number of unbonded spikes is equal to the number of spikes in the atom\r\n numUnbondedSpikes = 0\r\n for j in range(len(self.mol[i].spikeArray)):\r\n if self.mol[i].spikeArray[j].bonded == False:\r\n # Spike not bonded so increment counter\r\n numUnbondedSpikes += 1\r\n # If atom disconnected then need to check to see if dangling nodes or tails are bonded\r\n if numUnbondedSpikes == len(self.mol[i].spikeArray):\r\n print (\"Atom: \" + str(self.mol[i].rbnNumber) + \" is being removed \\n\")\r\n anyBondedDanglingNodes = False\r\n for j in range(len(self.mol[i].spikeArray)):\r\n if self.isUnbondedAtomConnected(self.mol[i].spikeArray[j]) == True:\r\n anyBondedDanglingNodes = True\r\n # If atom has connected dangling nodes then need to convert atom to metaAtom, add metaAtom to metaMolecule and\r\n # remove atom from ring\r\n if anyBondedDanglingNodes == True:\r\n print (\"A new metaAtom is being created \\n\")\r\n newMetaAtom = self.convertUnbondedAtomToMetaAtom(self.mol[i])\r\n self.metaMolecule.addMetaAtom(newMetaAtom)\r\n atomsToRemove.append(i)\r\n \r\n # Now need to remove atoms\r\n print (\"Length of ring before removal: \" + str(len(self.mol)) + \"\\n\")\r\n for i in range(len(atomsToRemove)):\r\n self.mol.pop(atomsToRemove[i])\r\n print (\"Length of ring after removal: \" + str(len(self.mol)) + \"\\n\")\r\n # Finally need to update metaMolecule with new mol \r\n self.metaMolecule.updateListMols(self)",
"def remove_repeated(l1, l2):\n for i in range(len(l1)-1):\n j=i+1\n while j<len(l1):\n if l1[j] == l1[i]:\n l1.pop(j)\n l2.pop(j)\n else:\n j+=1",
"def compact(seq):\n for item in seq:\n if item:\n yield item",
"def listDegenerate(self):\n return arange(self.nelems())[self.testDegenerate()]",
"def remove_direction(t):\n return [y for y, _ in t]",
"def drain(self, reverse=False):\n if not reverse:\n while self._first:\n item = self._items.pop(self._first)\n yield item[0]\n self._first = item[1][1]\n else:\n while self._last:\n item = self._items.pop(self._last)\n yield item[0]\n self._last = item[1][0]\n self.clear()",
"def remove_rear(self):\n\n traverse = self.front\n if self.rear == self.front:\n self.rear = None\n self.front = None\n return traverse.data\n\n while traverse.next != self.rear:\n traverse = traverse.next\n\n rear_value = self.rear\n self.rear = traverse\n traverse.next = None\n return rear_value.data",
"def remove_odds(self):\n cur = self.head\n\n while cur:\n if cur == self.head and cur.data % 2:\n self.head = cur.next\n cur = self.head\n if not cur:\n self.tail = None\n continue\n\n if cur and cur.data % 2:\n prev.next = cur.next\n cur = cur.next\n if not cur:\n self.tail = prev\n\n if cur and not cur.data % 2:\n prev = cur\n cur = cur.next"
] | [
"0.814443",
"0.7579058",
"0.65186775",
"0.64783514",
"0.6305853",
"0.61302996",
"0.6013019",
"0.59792227",
"0.5977844",
"0.5863139",
"0.58459187",
"0.58252424",
"0.5821521",
"0.565928",
"0.565343",
"0.5642883",
"0.5621129",
"0.55982196",
"0.55754906",
"0.55539966",
"0.5539476",
"0.5534452",
"0.5522315",
"0.5517268",
"0.54911095",
"0.5487317",
"0.54783213",
"0.54743683",
"0.5473671",
"0.5469544"
] | 0.7961374 | 1 |
Return sequence with the first four and last four items removed, plus every other item in the remaining sequence. | def remove_four_and_every_other(seq):
# Make a copy of the original sequence, but omit the first four and last four elements
new_seq = seq[4:-4]
# Make a copy of new sequence and step by 2
new_seq = new_seq[::2]
return new_seq | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_4s_every_other_in_between(seq):\n seq_copy = seq [4:-4:2]\n return seq_copy",
"def fours_removed(seq):\n length = len(seq) - 4\n new_seq = seq[4:length:2]\n return new_seq",
"def remove_every_other_item(seq):\n seq_copy = seq [0::2]\n return seq_copy",
"def remove_every_other(seq):\n length = len(seq)\n new_seq = seq[0:length:2]\n return new_seq",
"def remove_every_other(seq):\n # Make a copy of the original sequence and step by 2\n new_seq = seq[::2]\n\n return new_seq",
"def first_four_last_four(seq):\n seq = seq[4:-4:2]\n return seq",
"def drop(n, seq):\n return itertools.islice(seq, n, None)",
"def strip_tail(sequence, values):\n return list(reversed(list(strip_head(reversed(sequence), values))))",
"def rem_ends_odds(seq):\n\ttemp_seq=seq[4:-4]\n\treturn rem_odd(temp_seq)\n\n\ta_string = \"this is a string\"\n\ta_tuple = (1,2,3,4,5,6,7,8,9,10,11,12)\n\n\tassert rem_ends_odds(a_string) == \" sas\"\n\tassert rem_ends_odds(a_tuple) == (5,7)",
"def trim_sequences(seq_1, seq_2):\n start_index = 0\n end_index = 0\n\n for base_1, base_2 in zip(seq_1, seq_2):\n if base_1 == base_2:\n start_index += 1\n else:\n break\n seq_1 = seq_1[start_index:]\n seq_2 = seq_2[start_index:]\n\n for base_1, base_2 in zip(reversed(seq_1), reversed(seq_2)):\n if base_1 == base_2:\n end_index += 1\n else:\n break\n\n if end_index != 0:\n seq_1 = seq_1[:-end_index]\n seq_2 = seq_2[:-end_index]\n return seq_1, seq_2, start_index, end_index",
"def strip_head(sequence, values):\n values = set(values)\n return list(itertools.dropwhile(lambda x: x in values, sequence))",
"def first_last_chop(seq):\n return seq[4:-4:2]",
"def drop(lst, n): # noqa: N805\n for _ in range(n):\n try:\n lst = lst.tail\n except AttributeError:\n break\n return lst",
"def chop_up_to_4s(list, n):\n sublists = []\n num_sublists = 4**(n-1)\n for i in range(num_sublists):\n sublists.append(list[4*i: 4*i + 4])\n return sublists",
"def trim_axs(axs, N):\n axs = axs.flat\n for ax in axs[N:]:\n ax.remove()\n return axs[:N]",
"def trim_axs(axs, N):\n axs = axs.flat\n for ax in axs[N:]:\n ax.remove()\n return axs[:N]",
"def trim_axs(axs, N):\n axs = axs.flat\n for ax in axs[N:]:\n ax.remove()\n return axs[:N]",
"def trim_axs(axs, n):\n axs = axs.flat\n for ax in axs[n:]:\n ax.remove()\n return axs[:n]",
"def remove_sequence(self):\n self.sequence_fragment_list = []",
"def rem_odd(seq):\n\treturn seq[::2]\n\n\ta_string = \"this is a string\"\n\ta_tuple = (1,2,3,4,5,6)\n\n\tassert rem_odd(a_string) == 'ti sasrn'\n\tassert rem_odd(a_tuple) == (1,3,5)",
"def remove_every_other(lst):\n return [ea for ea in lst if lst.index(ea) % 2 == 0 ]",
"def drop(iterable, n):\n counter = 0\n for element in iterable:\n if counter < n:\n counter += 1\n else:\n yield element",
"def drop(iterable, n, islice=islice):\n return islice(iterable, n, None)",
"def elements_reversed(seq):\n new_seq = seq[::-1]\n return new_seq",
"def fast_forward_to_length(sequences, length):\n return itertools.dropwhile(lambda seq: len(seq) != length, sequences)",
"def delete_all_gap(self):\n # pdb.set_trace()\n\n rem = set(self.get_all_gap_cols())\n subset = [x for x in range(0, self.get_length()) if x not in rem]\n self.remove_columns(set(rem))\n #_LOG.debug(\"Alignment length reduced to %d\" % len(subset))\n return subset",
"def sequence_del(my_str):\r\n new = ''\r\n l = len(my_str)\r\n for i in range(l -1):\r\n # for j in range(1,len(my_str)):\r\n if my_str[i] == my_str[i+1]:\r\n continue\r\n new += my_str[i]\r\n new += my_str[i]\r\n print(new)",
"def rotate(l: list, n: int) -> list:\n return l[-n:] + l[:-n]",
"def reverse_elements(seq):\n\n new_seq = []\n\n i = -1\n\n while i >= -len(seq):\n new_seq.append(seq[i])\n i -= 1\n\n return format_seq(seq, new_seq)",
"def rearrange_thirds(seq):\n length = int(len(seq) / 3)\n new_seq = seq[-length:] + seq[:length] + seq[length:-length]\n return new_seq"
] | [
"0.7949985",
"0.72331405",
"0.6958973",
"0.66397864",
"0.6586896",
"0.65185237",
"0.61443645",
"0.5820111",
"0.5701834",
"0.5620001",
"0.5594261",
"0.55820346",
"0.5514332",
"0.5510886",
"0.54914135",
"0.54914135",
"0.54914135",
"0.545188",
"0.5427004",
"0.5376906",
"0.5332845",
"0.53194565",
"0.5313642",
"0.52625525",
"0.5245914",
"0.52226174",
"0.5217813",
"0.5200636",
"0.5197703",
"0.5187304"
] | 0.8401329 | 0 |
Return a sequence with the last third, then first third, then middle third in the new order. | def last_first_middle_third(seq):
# Using the length of the sequence, figure out roughly what one third should be
one_third = len(seq) // 3
new_seq = list(seq[-one_third:])
new_seq.extend(seq[:-one_third])
return format_seq(seq, new_seq) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def third_reorder(seq):\n third = len(seq)//3\n return seq[third:-third]+seq[-third:]+seq[:third]",
"def replace_thirds(seq):\n third = int(len(seq)/3)\n middle_third = seq[third:-third]\n last_third = seq[-third:]\n first_third = seq[0:third]\n seq_copy = middle_third + last_third + first_third\n return seq_copy",
"def replace_thirds(seq):\n third = int(len(seq)/3)\n middle_third = seq[third:-third]\n last_third = seq[-third:]\n first_third = seq[0:third]\n seq_copy = middle_third + last_third + first_third\n return seq_copy",
"def third_mixup(seq):\n len_third = int(len(seq) / 3)\n third_mixup = seq[-len_third:] + seq[:-len_third]\n return third_mixup",
"def rearrange_thirds(seq):\n length = int(len(seq) / 3)\n new_seq = seq[-length:] + seq[:length] + seq[length:-length]\n return new_seq",
"def thirds(seq):\n third = math.floor(len(seq)/3)\n seq = seq[third:third*2] + seq[third*2:] + seq[0:third]\n return seq",
"def rotate_left3(nums):\n rotated_list = nums[1:len(nums)]\n rotated_list.append(nums[0])\n return rotated_list",
"def insert_sequence(x,y,z):\n return x[:z] + y + x[z:]",
"def rotate3(self, nums, k) -> None:\n k = k % len(nums)\n count = 0\n for i in range(len(nums)):\n if count >= len(nums):\n break\n current = i\n previous = nums[i]\n while True:\n next = (current + k) % len(nums)\n temp = nums[next]\n nums[next] = previous\n previous = temp\n current = next\n count += 1\n if(i == current):\n break",
"def sequence(side_length):\r\n index = side_length\r\n numbers = []\r\n tmp1 = (index -1 ) / 2\r\n #numbers.append([index, 3, 5, 7, 9])\r\n for i in range(tmp1):\r\n if i == 0:\r\n numbers.append([3, 3, 5, 7, 9])\r\n else:\r\n diff = (3+i*2) - 1\r\n tmp2 = numbers[i-1][4] + diff\r\n numbers.append([3+i*2, tmp2, tmp2+diff, tmp2+diff*2, tmp2+diff*3])\r\n return numbers",
"def exchange_first_last(seq):\n first = seq[0:1]\n middle = seq[1:-1]\n last = seq[-1:]\n seq_copy = last + middle + first\n return seq_copy",
"def get_median_of_three(sequence, lo, hi, key=key_fun):\n mid = (hi + lo) // 2\n\n lo_val = key(sequence[lo])\n mid_val = key(sequence[mid])\n high_val = key(sequence[hi])\n\n if lo_val < mid_val:\n if mid_val < high_val:\n pivot = mid\n elif lo_val > high_val:\n pivot = lo\n else:\n pivot = hi\n else:\n if mid_val > high_val:\n pivot = mid\n elif lo_val < high_val:\n pivot = lo\n else:\n pivot = hi\n\n return pivot",
"def personal_top_three(scores):\n return sorted(scores, reverse=True)[:3]",
"def personal_top_three(scores):\n return sorted(scores, reverse=True)[:3]",
"def rotate3(nums, k):\n n = len(nums)\n\n if k == 0:\n return nums\n if n < 2:\n return nums\n\n k = k % n # In case k > len(nums), prevent redundant rotations\n\n for i in range(k):\n saved = nums[n - k + i] # The extra O(1) space\n for j in range(n - k + i, i, -1):\n # Since we're rotating elements to the right, we should traverse the\n # index from right-to-left to avoid overwriting previously traversed\n # elements\n nums[j] = nums[j-1]\n nums[i] = saved\n\n return nums",
"def mid_last_first(seq):\n\tif len(seq)%3==2:\n\t\tF_L=len(seq)//3+1\n\telse:\n\t\tF_L=len(seq)//3\n\treturn seq[F_L:]+seq[:F_L]\n\n\tassert mid_last_first(\"testing123\") =='ting123tes'\n\tassert mid_last_first(\"testing1234\") == 'ing1234test'\n\tassert mid_last_first(\"testing12345\") == 'ing12345test'\n\tassert mid_last_first((1,2,3,4,5))== (3,4,5,1,2)",
"def reorder_proper_torsions(i0, i1, i2, i3):\n if i0 < i3:\n j0, j1, j2, j3 = i0, i1, i2, i3\n else:\n j0, j1, j2, j3 = i3, i2, i1, i0\n\n return j0, j1, j2, j3",
"def exchange_first_last(seq):\n return seq[-1:]+seq[1:-1]+seq[0:1]",
"def first_four_last_four(seq):\n seq = seq[4:-4:2]\n return seq",
"def one_to_three(chain_refined):\n three_res_list = []\n\n for res in chain_refined:\n three = utilities.one_to_three[res]\n three_res_list.append(three)\n return three_res_list",
"def Quick3(items, lo, hi):\r\n if hi <= lo:\r\n return\r\n lt = lo\r\n gt = hi\r\n pivot = items[(hi+lo)//2]\r\n i = lo\r\n while i <= gt:\r\n if items[i] < pivot:\r\n items[lt], items[i] = items[i], items[lt]\r\n lt += 1\r\n i += 1\r\n elif items[i] > pivot:\r\n items[gt], items[i] = items[i], items[gt]\r\n gt -= 1\r\n else:\r\n i += 1\r\n print(items)\r\n Quick3(items, lo, lt - 1)\r\n Quick3(items, gt + 1, hi)",
"def mixByThirds(n):\n eachDiv = int(len(n) / 3)\n if len(n) % 3 == 0:\n return(n[(2 * eachDiv): (3 * eachDiv)] +\n n[0: eachDiv] + n[eachDiv: (2 * eachDiv)])\n elif len(n) % 3 == 2:\n return(n[(2 * eachDiv + 1): ((3 * eachDiv) + 2)] +\n n[0: eachDiv] + n[eachDiv: (2 * eachDiv + 1)])\n else:\n return(n[(2 * eachDiv): (3 * eachDiv + 1)] +\n n[0: eachDiv] + n[eachDiv: (2 * eachDiv)])",
"def middle(x):\n del x[0] \n l = len(x)\n del x[l-1]\n return x",
"def front3(str):\r\n if len(str)<4:\r\n return 3*str\r\n else:\r\n return 3*str[:3]",
"def previous_and_next(all_items: Iterable) -> Iterable:\n previous_items, items, next_items = tee(all_items, 3)\n previous_items = chain([None], previous_items)\n next_items = chain(islice(next_items, 1, None), [None])\n return zip(previous_items, items, next_items)",
"def one_to_three(one):\n return ((1,0,0),(0,1,0),(0,0,1))[one+1]",
"def middle(some_list):\n #This function will return the new list from input list but not have the first element and the last element.\n new_list = [] # Define new_list to cantain element in original list.\n for e in some_list: \n new_list.append(e)\n new_list.pop(0)\n #print(some_list)\n new_list.pop(-1)\n return new_list",
"def exchange_first_last(seq):\n # Create new list and set it to the last element of the original sequence\n new_seq = [seq[-1]]\n\n # Add the middle elements from the original sequence\n new_seq.extend(seq[1:-1])\n\n # Add the first element from the original sequence\n new_seq.append(seq[0])\n\n # Run new sequence through formatting function\n return format_seq(seq, new_seq)",
"def exchange_first_last(seq):\n seq = seq[-1:] + seq[1:-1] + seq[:1]\n return seq",
"def splice(l, a, b, c):\n\n return l[:a] + [c] + l[a + b:], l[a:a + b]"
] | [
"0.84744817",
"0.7467445",
"0.7467445",
"0.7054174",
"0.7008001",
"0.6548605",
"0.6217295",
"0.6016097",
"0.58688897",
"0.5803951",
"0.57408905",
"0.57041776",
"0.565916",
"0.565916",
"0.56146777",
"0.5578442",
"0.55430853",
"0.55274",
"0.549154",
"0.5474041",
"0.5442637",
"0.5424496",
"0.53935456",
"0.5386467",
"0.5367253",
"0.53404737",
"0.531373",
"0.5303459",
"0.5285764",
"0.52649635"
] | 0.7918163 | 1 |
Given the zenith PWV (reported by APEX) and altitude of source, returns the real amount of water between the telescope and space. Basically returns pwv/cos(zenith_angle) | def get_real_pwv(pwv, altitude):
zenith_angle = 90-altitude
airmass = 1/np.cos(zenith_angle*np.pi/180)
return pwv*airmass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def VaporPressure(dwpt):\n\n return 611.2*exp(17.67*dwpt/(243.5+dwpt))",
"def pressure(altitude):\n t = temperature(altitude) # R\n if altitude <= 36152:\n p = 2116*(t/518.6)**5.256 # psf\n else:\n p = 473.1*exp(1.73-0.000048*altitude) # psf\n return p",
"def water_vapour(t):\n T_0 = 273.15\n T_rw = 35.86 # over water\n a = 17.269\n # cdo -mulc,610.78 -exp -div -mulc,17.5 -subc,273.15 a\n return 610.78 * np.exp(a * (t - T_0) / (t - T_rw))",
"def altitude(self):\r\n pressure = self.pressure # in Si units for hPascal\r\n return 44330 * (1.0 - math.pow(pressure / self.sea_level_pressure, 0.1903))",
"def altitude(p):\r\n \r\n R = 290 #specific gas constant \r\n T = 93.65 #surface temperature K from A.Coustenis book\r\n g = 1.354 #surface gravity from A.Coustenis book\r\n p0 = 1467 #surface pressure in hPa 6.1 for mars\r\n \r\n z = np.empty_like(p)\r\n \r\n for i in range(p.shape[0]):\r\n z[i] = (-1)*(R*T/g)*np.log((p[i])/p0)/(10**3)\r\n \r\n # Make into an xarray DataArray\r\n z_xr = xr.DataArray(z, coords=[z], dims=['pfull'])\r\n z_xr.attrs['units'] = 'km'\r\n \r\n #below is the inverse of the calculation\r\n #p[i] = p0*np.exp((-1)*z[i]*(10**3)/((R*T/g)))\r\n \r\n return z_xr",
"def wind_ppa():\n per_kwh = 0.0384 # [$/kWh]\n\n return per_kwh",
"def calc_VPD(t_celsius, rel_humidity):\n # according to Licor LI-6400 manual pg 14-10\n # and Buck AL (1981). New equations for computing vapor pressure and\n # enhancement factor. J Appl Meteor 20:1527-1532\n vp_sat = 0.61365 * math.exp((17.502 * t_celsius) / (240.97 + t_celsius))\n\n vp_air = vp_sat * rel_humidity\n return vp_sat - vp_air # or vp_sat * (1 - rel_humidity)",
"def vaporPressure(temp: float) -> float:\n exponent = (17.27*temp)/(temp + 237.3)\n vp = 611*np.exp(exponent)\n\n return vp",
"def Wp(self):\n Wp = trapz_loglog(self._Ep * self._J, self._Ep) * u.GeV\n return Wp.to('erg')",
"def air_density(altitude):\n p = pressure(altitude) # psf\n t = temperature(altitude) # R\n rho = p/(gas_constant*t) # lb/ft3\n return rho",
"def get_evaporation_latent_heat() -> float:\n theta = 28.0\n return 2500.8 - 2.3668 * theta",
"def zeta(self, Ppump):\n return(self.alpha(Ppump) / 2. / self.w0(Ppump))",
"def wind_chill(T_a, v):\r\n return 13.12 + 0.6215*(T_a) - 11.37*(v)**0.16 + 0.3965*(T_a)*(v)**0.16",
"def closest_cruising_altitude(altitude):\n return 1000 * ((altitude + 500) // 1000)",
"def wind_potential_calculation(p_ngz, p_hyst, g_v, v_v, w_speed, w_theta, p, heading):\n [max_vel, up_beat, dn_beat, w_theta] = spdf.speed_polar_diagram_calculation(w_speed, w_theta)\n no_go = np.array([np.deg2rad(up_beat), np.deg2rad(dn_beat)])\n w_theta = np.deg2rad(w_theta)\n point_angle = np.arctan2(p[1], p[0])\n heading_angle = np.arctan2(heading[1], heading[0])\n rel_heading_angle = heading_angle - w_theta\n rel_point_angle = point_angle - w_theta\n\n while rel_point_angle < 0:\n rel_point_angle = rel_point_angle + 2 * np.pi\n while rel_point_angle > 2 * np.pi:\n rel_point_angle = rel_point_angle - 2 * np.pi\n\n while rel_heading_angle < 0:\n rel_heading_angle = rel_heading_angle + 2 * np.pi\n if rel_heading_angle > 2 * np.pi:\n rel_heading_angle = rel_heading_angle - 2 * np.pi\n\n if (no_go[1] <= rel_point_angle <= no_go[1] + 2*(np.pi - no_go[1])) or (\n no_go[0] >= abs(rel_point_angle) >= 0) \\\n or (abs(rel_point_angle) >= (2 * np.pi - no_go[0])):\n return \"case1\"\n# return p_ngz\n if (rel_heading_angle < no_go[1] < rel_point_angle) or (\n rel_heading_angle > no_go[1] > rel_point_angle):\n return \"case2\"\n# return p_hyst + g_v * ((v_v - max_vel) / max_vel)\n else:\n return \"case3\"",
"def solar_ppa():\n per_kwh = 0.196 # [$/kWh]\n\n return per_kwh",
"def vapour_pressure(self):\n return self.relative_humidity * self.solvent.equilibrium_vapour_pressure(self.temperature)",
"def compute_windchill(t,v):\n a = 35.74\n b = 0.6215\n c = 35.75\n d = 0.4275\n v16 = v**0.16\n wci = a+(b*t)-(c*v16)+(d*t*v16)\n return wci",
"def get_stream_function_vortex(strength, xv, yv, X, Y):\r\n psi = strength / (4 * math.pi) * numpy.log((X - xv)**2 + (Y - yv)**2)\r\n \r\n return psi",
"def altitude_to_pressure(alt):\n for i in range(len(_heights)-1,0,-1):\n h0 = _heights[i]\n T0 = _basetemps[i]\n if alt > h0:\n if _isotherm[i]:\n rP = math.exp(-_g / _R / T0 * (alt - h0))\n else:\n l0 = _lapsert[i]\n rP = math.pow(1 + (alt - h0) * l0 / T0, -_g / _R / l0)\n return _basepress[i] * rP\n l0 = _lapsert[0]\n return _stdpres * math.pow(1 + alt * l0 / _stdtemp, -_g / _R / l0)",
"def get_van_Der_Waals_radius(self):\n return self.van_Der_Waals_radius",
"def wind_adjust_func(uz_array, zw):\n return uz_array * 4.87 / np.log(67.8 * zw - 5.42)",
"def exner_function(pressure, reference_pressure=P0):\n return (pressure / reference_pressure)**kappa",
"def calculate_water_vapour_pressure(self, T=None, units='atm'): # temp in Kelvin\n A,B,C = self.get_ABC(T=T)\n \n if A is not None and B is not None and C is not None:\n # bar \n p_vap_bar = math.pow(10, (A-B/(C+T)))\n if units=='bar':\n return p_vap_bar\n \n # atm\n elif units=='atm': \n p_vap_atm = convertor.convert(\n p_vap_bar, \n currentUnits='bar', \n newUnits='atm')\n return p_vap_atm\n \n else:\n return None\n else:\n return None",
"def latent_heat_vapourisation(self, tair):\n return (2.501 - 0.00237 * tair) * 1E06",
"def hw_func(self):\n i, o = self.inl[0].to_flow(), self.outl[0].to_flow()\n\n if abs(i[0]) < 1e-4:\n return i[1] - o[1]\n\n v_i = v_mix_ph(i, T0=self.inl[0].T.val_SI)\n v_o = v_mix_ph(o, T0=self.outl[0].T.val_SI)\n flow_dir = np.sign(i[0])\n\n return ((i[1] - o[1]) * flow_dir -\n (10.67 * abs(i[0]) ** 1.852 * self.L.val /\n (self.ks.val ** 1.852 * self.D.val ** 4.871)) *\n (9.81 * ((v_i + v_o) / 2) ** 0.852))",
"def hdw(sounding, elevation=None):\n \n bottom = sounding.profile.elevation\n if elevation is not None and elevation > bottom:\n bottom = elevation\n top = bottom + 500.0\n \n # Find the station pressure for the surface adjusted temperature and dew point.\n bottom_p = sounding.surface.pres\n i = 0\n while bottom_p is None or sounding.profile.hgt[i] < bottom:\n bottom_p = sounding.profile.pressure[i]\n i += 1\n \n vals = zip(\n sounding.profile.hgt, sounding.profile.temp, sounding.profile.dewpoint,\n sounding.profile.windSpd, sounding.profile.pressure\n )\n\n vals = filter(lambda x_: x_[0] >= bottom, vals)\n vals = tuple(takewhile(lambda x: x[0] <= top, vals))\n \n # Filter out None values\n vpds = (\n (x[1], x[2], x[4])\n for x in vals\n if x[1] is not None and x[2] is not None and x[4] is not None\n )\n # Convert to potential temperature and specific humidity for reducing to the surface.\n vpds = ((wxf.theta_kelvin(x[2], x[0]), wxf.specific_humidity(x[1], x[2])) for x in vpds)\n # Finish surface adjustment.\n vpds = (\n (\n wxf.temperature_c_from_theta(x[0], bottom_p),\n wxf.dew_point_from_p_and_specific_humidity(bottom_p, x[1])\n ) for x in vpds\n )\n \n vpds = ((wxf.vapor_pressure_liquid_water(x[0]) - \\\n wxf.vapor_pressure_liquid_water(x[1])) for x in vpds)\n max_vpd = max(vpds)\n \n max_wspd = max(x[3] for x in vals if x[3] is not None)\n max_wspd = wxf.knots_to_mps(max_wspd)\n \n return max_vpd * max_wspd",
"def raw_zener_voltage(self) -> int:\n self._update_analog_value_cache()\n return self.analog_cache.zener_voltage",
"def getEnthalpyOfVaporization(self,Temperature):\n\t\tB = self.Antoine_params[1]\n\t\tC = self.Antoine_params[2]\n\n\t\t# Eqn 7 from Epstein et al 2009\n\t\tHvap = 2.303*8.3145*Temperature*Temperature*B/((C + Temperature - 273.15)*(C + Temperature - 273.15))\n\t\treturn Hvap # units are J/molK",
"def getHt(self) -> float:\n\n return self.p3ddict.p3dz()"
] | [
"0.6731954",
"0.6352121",
"0.6210927",
"0.6185982",
"0.6112099",
"0.6091341",
"0.60461605",
"0.60024655",
"0.5860308",
"0.5859313",
"0.5838366",
"0.5835615",
"0.58182067",
"0.57965446",
"0.575435",
"0.5737927",
"0.5728905",
"0.5710867",
"0.57060504",
"0.57017237",
"0.5690225",
"0.56718177",
"0.5668589",
"0.5664495",
"0.56443787",
"0.5644136",
"0.56270325",
"0.5602641",
"0.5592489",
"0.5588534"
] | 0.7299661 | 0 |
Insert a Follower into the database | def fillFollowerInDB(self):
sqlInsertFollowers = "INSERT INTO follower screen_name VALUES %s"
mycursor.execute(sqlInsertFollowers,self.screen_name)
mydb.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def followUser(following):\n\n cur, user_id, con = initialise(3, True)\n cur.execute(\"INSERT INTO followers (user, following) VALUES ((SELECT username FROM users WHERE id = ?), ?)\", (user_id, following))\n finish(con)",
"def follow(self, followerId: int, followeeId: int) -> None:\n if followeeId not in self.users:\n self._create_user(followeeId)\n if followerId not in self.users:\n self._create_user(followerId)\n self.users[followerId].add(followeeId)",
"def follow(self, followerId: int, followeeId: int) -> None:\n if followerId == followeeId: return\n self.users[followerId].add(followeeId)",
"def follow(self, followerId: 'int', followeeId: 'int') -> 'None':\n self.followees[followerId].add(followeeId)",
"def follow(whomUserName,whoUserName):\n\n whomuser = query_db('select * from user where username = ?',\n [whomUserName], one=True)\n whouser = query_db('select * from user where username = ?',\n [whoUserName], one=True)\n\n\n followed = query_db('''select 1 from follower where\n follower.who_id = ? and follower.whom_id = ?''',\n [whouser['user_id'], whomuser['user_id']],one=True) is not None\n\n if whouser is None:\n return jsonify({'message':'User trying to follow another user which does not exist'}),404\n\n if whomuser is None:\n return jsonify({'message':'User getting followed does not exist yet'}),404\n\n if not followed:\n db = get_db()\n\n db.execute('''insert into follower (\n who_id, whom_id) values (?, ?)''',\n [whouser['user_id'], whomuser['user_id']])\n db.commit()\n flash('Operation successful')\n return jsonify({'message': 'Successfully following'}), 201\n else:\n return jsonify({'message':'Specified user is already following another user'}),403",
"def follow(self, followerId: int, followeeId: int) -> None:\n self.followees[followerId].add(followeeId)",
"def add_to_following(sender, instance, created, **kwargs):\r\n sender_= instance.sender\r\n receiver_ = instance.receiver\r\n if instance.status == 'accepted':\r\n sender_.following.add(receiver_.user)",
"def follow(self, follower, followee):\n pass",
"def follow_user(username):\n if not g.user:\n abort(401)\n whom_id = get_user_id(username)\n if whom_id is None:\n abort(404)\n db = get_db()\n db.execute('insert into follower (who_id, whom_id) values (?, ?)',\n [session['user_id'], whom_id])\n db.commit()\n flash('You are now following \"%s\"' % username)\n return redirect(url_for('user_timeline', username=username))",
"def follow(self, followerId: int, followeeId: int) -> None:\n if followerId == followeeId:\n return\n if followerId not in self.users.keys():\n self.users[followerId] = user()\n if followeeId not in self.users.keys():\n self.users[followeeId] = user()\n self.users[followerId].followees[followeeId] = self.users[followeeId]",
"def user_follow():\n data = request.get_json(force=True)\n follower = User.query.get(data['follower'])\n following = User.query.get(data['following'])\n follower.followcheck.append(following)\n db.session.commit()\n return {'followed': True}",
"def force_follow_department(db):\n\n db.execute('''insert or ignore into follower(who_id, whom_id) select user_id, department_id from\n user, department where user.city = department.city''')\n db.commit()",
"def follow(self, followerId, followeeId):\n\n # 把 followeeId append到他的 follow 属性中\n if followerId == followeeId: # 不能自己关注自己\n return\n # 实例化一个user(followerID)\n follower = UserInfo()\n follower.user_id = followerId \n follower.follows.append(followeeId) \n self.user_pool[followerId] = follower",
"def create(self, validated_data):\n\n following = models.FollowingsModel(\n followed = validated_data['followed']\n )\n request = self.context.get('request', None)\n following.follower = request.user\n existings = models.FollowingsModel.objects.filter(followed=following.followed, follower=following.follower)\n if len(existings) == 0:\n following.save()\n return following\n elif following.follower == following.followed:\n raise serializers.ValidationError({'message':'You Cannot follow yourself'})\n\n raise serializers.ValidationError({'message':'You have already followed this user.'})",
"def follow(self, followerId: int, followeeId: int) -> None:\n self.follow_map[followerId].add(followeeId)",
"def follow(self, followerId: int, followeeId: int) -> None:\n self.user_followed[followerId].append(followeeId)",
"def follow(current_user,user_id):\n if request.method == \"POST\":\n #follee = request.get_json('user_id')\n if User.query.filter_by(userid= user_id):\n follow = Follows(userid =user_id, follower_id =current_user.userid)\n db.session.add(follow)\n db.session.commit()\n return jsonify({'message' :'You are now following'})\n return jsonify({'message' :'User doesnt exist..Try again'})\n return jsonify({'errors' : 'Method Invalid'})",
"def follow(self, followerId, followeeId):\r\n if followerId != followeeId:\r\n self.follows[followerId].add(followeeId)",
"def addOne():\n print(inspect.stack()[1][3])\n # read data from the API call\n req_data = request.get_json()\n json_data = {}\n\n for req in req_data:\n if (req in Followup.c.keys()):\n json_data[req] = req_data[req]\n\n query = (\n insert(Followup).\n values(json_data)\n )\n ResultProxy = connection.execute(query)\n if(not ResultProxy):\n return {'error': 'Unable to Add the given client'}\n return {'status': \"Adding Succesful\"}",
"def follow(request, usertofollow):\n to_follow = Member.objects.get(user__username=usertofollow)\n user = Member.objects.get(user=request.user)\n user.following.add(to_follow)\n user.save()\n return redirect(request.META['HTTP_REFERER'])",
"def follow(self, user):\n if not self.is_following(user):\n f = Follow(follower=self, followed=user)\n db.session.add(f)",
"def post(self, request, pk):\n try:\n follower = request.user\n question = Question.objects.filter(pk=pk).first()\n\n \"\"\"Return HTTP 404 if the question does not exist\"\"\"\n if question is None:\n return JsonResponse({\"error\": \"Question you requested to follow does not exist\"}, status=status.HTTP_404_NOT_FOUND)\n\n \"\"\"Check if the following record already exists, if not create it, but if it does, fail silently\"\"\"\n if not QuestionFollowing.objects.filter(user=follower, question=question).exists():\n QuestionFollowing.objects.create(user=follower, question=question)\n \"\"\"Increment the question's following\"\"\"\n question.followings += 1\n question.save()\n\n return JsonResponse({'status': True}, status=status.HTTP_200_OK)\n except Exception as e:\n print(e)\n # return JsonResponse({'status': False, 'error': str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)",
"def test_is_followed_by(self):\n\n self.u1.followers.append(self.u2)\n db.session.commit()\n\n self.assertTrue(self.u1.is_followed_by(self.u2))\n self.assertFalse(self.u2.is_followed_by(self.u1))",
"def test_following_existing_user(self):\n response = self.client.post(\n reverse(\n 'follow',\n kwargs={'username': self.followed['user'].get('username')}\n )\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)",
"def post(self):\n args = player_parser.parse_args()\n print(args)\n unique_player = DBPlayer.query.filter_by(nickname=args['nickname']).first()\n if unique_player:\n return get_response(409, 'player already existed!')\n try:\n new_player = DBPlayer(**args)\n db.session.add(new_player)\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n return get_response(400, \"{e}\".format(e=str(e)))\n return get_response(201, 'done!')",
"def insert_player(document):\n players_col.insert_one(document)",
"def follow_user(self, target):\n try:\n if self.api.me().friends_count > 1990:\n return\n except Exception, e:\n print e\n\n \"Rate limit exceeded. Clients may not make more than 350 requests per hour.\"\n if \"Clients\" in str(e):\n continue\n # import pdb; pdb.set_trace()\n return\n\n try:\n self.api.create_friendship(target.hunted.screen_name)\n self.log.debug(\"Followed: %s\" % target.hunted.screen_name)\n except Exception, e:\n self.log.exception(\"Could not follow %s\" %\n target.hunted.screen_name)\n else:\n # Write record of new follow to db\n target.status = Target.PURGATORY\n target.save()",
"def add_follow(follow_id):\n\n want_to_follow_user = User.query.get_or_404(follow_id)\n if want_to_follow_user.private:\n # =========== NEED TO IMPLEMENT ====================\n # send them a request to follow\n want_to_follow_user.from_users.append(g.user) \n db.session.commit()\n flash(\"Your request has been sent\", \"success\")\n return redirect(f\"/users/{g.user.id}/following\")\n\n g.user.following.append(want_to_follow_user)\n db.session.commit()\n\n return redirect(f\"/users/{g.user.id}/following\")",
"def test_is_following(self):\n\n self.u1.following.append(self.u2)\n db.session.commit()\n\n self.assertTrue(self.u1.is_following(self.u2))\n self.assertFalse(self.u2.is_following(self.u1))",
"def add_follow(follow_id):\n followed_user = User.query.get_or_404(follow_id)\n if not g.user or g.user.id == follow_id or followed_user.is_blocking(g.user):\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n g.user.following.append(followed_user)\n db.session.commit()\n\n return redirect(f\"/users/{g.user.id}/following\")"
] | [
"0.78603786",
"0.6736899",
"0.6574572",
"0.6521408",
"0.6442153",
"0.6402648",
"0.6336011",
"0.6331151",
"0.6322974",
"0.6309035",
"0.63037306",
"0.6298957",
"0.62458616",
"0.62110263",
"0.6175521",
"0.61738724",
"0.6148766",
"0.6145518",
"0.61096025",
"0.6083749",
"0.6081882",
"0.6070756",
"0.60669917",
"0.5991552",
"0.59779054",
"0.59571",
"0.5952108",
"0.5948747",
"0.59482485",
"0.59430265"
] | 0.76846194 | 1 |
Calculate the total loss on a single tower running the CIFAR model. | def tower_loss(scope):
# Get images and flows for Flownet.
img1, img2, flo = flownet_input.inputs(False, FLAGS.data_dir, FLAGS.batch_size)
# Build a Graph that computes predictions from the inference model.
logits = flowNet.inference(img1, img2, FLAGS.batch_size)
# Add to the Graph the Ops for loss calculation.
_ = flowNet.loss(logits, flo)
# Assemble all of the losses for the current tower only.
losses = tf.get_collection('losses', scope)
# Calculate the total loss for the current tower.
total_loss = tf.add_n(losses, name='total_loss')
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
loss_name = re.sub('%s_[0-9]*/' % flowNet.TOWER_NAME, '', l.op.name)
tf.summary.scalar(loss_name, l)
return total_loss | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loss_total(self):\r\n def loss(y_true, y_pred):\r\n l2 = 1/2*K.sum(K.square(y_true-y_pred))\r\n\r\n return l2\r\n return loss",
"def compute_loss(self):",
"def loss_total(self, mask):\n\n def loss(y_true, y_pred):\n\n # Compute predicted image with non-hole pixels set to ground truth\n y_comp = mask * y_true + (1-mask) * y_pred\n\n # Compute the vgg features. \n if self.vgg_device:\n with tf.device(self.vgg_device):\n vgg_out = self.vgg(y_pred)\n vgg_gt = self.vgg(y_true)\n vgg_comp = self.vgg(y_comp)\n else:\n vgg_out = self.vgg(y_pred)\n vgg_gt = self.vgg(y_true)\n vgg_comp = self.vgg(y_comp)\n \n # Compute loss components\n l1 = self.loss_valid(mask, y_true, y_pred)\n l2 = self.loss_hole(mask, y_true, y_pred)\n l3 = self.loss_perceptual(vgg_out, vgg_gt, vgg_comp)\n l4 = self.loss_tv(mask, y_comp)\n l5 = - 0.5 * K.sum(1 + self.z_log_var -self.cl - K.square(self.z_mean)/K.exp(self.cl) - K.exp(self.z_log_var)/K.exp(self.cl))\n # Return loss function\n return l1 + 6*l2 + 0.05*l3 + 0.1*l4 +l5 \n return loss",
"def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss",
"def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss",
"def reduce_loss(self, all_loss):\n if self._gpu_num == 1:\n total_loss = all_loss[0]\n else:\n layer_loss = [all_loss[j] for j in range(self._gpu_num)]\n total_loss = tf.reduce_mean(layer_loss)\n\n return total_loss",
"def computeLoss(self):\n return sum(np.arccosh(-minkowskiArrayDot(self.examples, self.centroid)) ** 2)[0] / np.shape(self.examples)[0]",
"def loss(self):\n if not self.run:\n self._run()\n return self.model_loss",
"def get_contractive_loss(self):\n keys = list(self.head.state_dict().keys())\n W = Variable(self.head.state_dict()[keys[-2]])\n if torch.cuda.is_available():\n W = W.cuda()\n contractive_loss = torch.sum(W**2, dim=1).sum()\n return contractive_loss",
"def calc_loss(self, outputs, labels):\n information_loss = self.bottleneck.buffer_capacity.mean() # Taking the mean is equivalent of scaling with 1/K\n cross_entropy = F.cross_entropy(outputs, target=labels)\n total = cross_entropy + self.beta * information_loss\n self.ce_loss.append(cross_entropy.cpu().detach().numpy())\n self.info_loss.append(information_loss.cpu().detach().numpy())\n self.total_loss.append(total.cpu().detach().numpy())\n return total",
"def get_loss(self):\n return self.loss / self.cnt",
"def get_loss(\n self,\n inputs,\n outputs,\n annotations,\n cand_net,\n add_controller_regularization=True,\n add_evaluator_regularization=True,\n ):\n return sum(self._criterion(inputs, outputs, annotations, cand_net).values())",
"def compute_loss(self, x, gt):\n loss = sum([torch.mean((out - gt)**2) for out in self.forward(x)])\n return loss",
"def calculate_total_loss(self, train_x, train_y):\n return np.sum([self.calculate_error(x, y)\n for x, y in zip(train_x, train_y)])",
"def loss(self):\n return self._loss",
"def calc_loss(self, codes, encodings):\n return tf.reduce_mean((tf.stop_gradient(encodings) - codes) ** 2)",
"def tower_loss(scope, data_provider):\n\n images, _, labels = load_batch(data_provider, batch_size=BATCH_SIZE, height=image_size, width=image_size,\n is_training=True)\n\n # Create the model, use the default arg scope to configure the batch norm parameters.\n with slim.arg_scope(inception.inception_v1_arg_scope()):\n logits, _ = inception.inception_v1(images, num_classes=dataset.num_classes, is_training=True)\n\n one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes, scope=scope)\n\n slim.losses.softmax_cross_entropy(logits, one_hot_labels, scope=scope)\n\n total_loss = get_total_loss(scope)\n\n return total_loss",
"def calc_loss(prediction, target, bce_weight=0.5):\n # prediction = F.softmax(prediction)\n prediction = F.sigmoid(prediction)\n bce = F.binary_cross_entropy(prediction, target)\n \n dice = dice_loss(prediction, target)\n\n loss = bce * bce_weight + dice * (1 - bce_weight)\n\n return loss",
"def calc_loss(self, x: np.ndarray, y: np.ndarray) -> float:\n return self.descent.calc_loss(x, y)",
"def loss_op(self):\n return self.loss",
"def compute_loss(self):\n self.prototypes = self.compute_prototypes()\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.episode.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss",
"def calc_loss(self, codes, encodings):\n return tf.reduce_mean((encodings - tf.stop_gradient(codes)) ** 2)",
"def calculate_loss(self, X, y):\n probs = self.predict(X)\n\n num_examples = X.shape[0]\n\n sub = np.subtract(probs, y)\n abs_sum = np.abs(sub)\n sm = np.sum(abs_sum)\n loss = 1 - sm / num_examples\n print(\"Current loss: [ \" + str(\"{:6.5f}\").format(loss) + \" ]\")\n return loss",
"def _calc_loss(self, p_act_output:torch.Tensor, p_pred_output:torch.Tensor) -> float:\r\n\r\n return self._loss_fct(p_act_output, p_pred_output)",
"def calculate_training_loss(self):\n self.network.train()\n self.training_average_loss = self.calculate_average_loss(self.training_dataloader)",
"def calculate_loss(self, output, target, redmode = 'mean'):\n\n loss = F.cross_entropy(output, target, reduction = redmode)\n return loss",
"def compute_loss(self):\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.data.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss",
"def build_loss(self):\n if self.mode != \"encode\":\n total_loss = tf.losses.get_total_loss()\n tf.summary.scalar(\"losses/total\", total_loss)\n\n self.total_loss = total_loss",
"def compute_loss(self, depth_map, tar_image, direction, weighting=False):\n depth_map = depth_map.cuda(self.device)\n tar_image = tar_image.cuda(self.device)\n\n syn_image = self._reconstruct(depth_map=depth_map, direction=direction)\n if weighting==False:\n loss = self._recon_loss(tar=tar_image, syn=syn_image)\n else:\n loss = self._recon_loss_weighted(tar=tar_image, syn=syn_image)\n \n #back to main gpu\n loss = loss.cuda(0)\n return loss",
"def calculate_loss(self, output, batch, training_context, last_activation=None):\n if self._model_loss_key is None:\n return output\n else:\n return output[self._model_loss_key]"
] | [
"0.6700091",
"0.6494981",
"0.64351314",
"0.6419666",
"0.6419666",
"0.635259",
"0.634228",
"0.6337857",
"0.6266046",
"0.61995566",
"0.61981475",
"0.61751956",
"0.61435133",
"0.6103842",
"0.6090877",
"0.60856736",
"0.6064605",
"0.60511935",
"0.60422295",
"0.6033311",
"0.6024074",
"0.60003287",
"0.5990434",
"0.59748894",
"0.59718084",
"0.5946626",
"0.5940007",
"0.5912155",
"0.5902008",
"0.5901334"
] | 0.67803407 | 0 |
Random Subdomain attack packet builder | def randomSubBuilder(dom: string, src_ip: string, dst_ip: string, src_port: int, t: float, seed: float):
id_IP = int(RandShort()) #id for IP layer
id_DNS = int(RandShort()) #id for DNS layer
sub = randomSub(seed) #Random subdomain
q_name = sub + '.' + dom #Complete domain request
ans = Ether(src= '18:66:da:e6:36:56', dst= '18:66:da:4d:c0:08')/IP(src = src_ip, dst = dst_ip, id = id_IP)/UDP(sport = src_port)/DNS(rd = 0, id= id_DNS, qd=DNSQR(qname=str(q_name)))
ans.time = t #Set time
return ans | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_domainname():\n domainname = ''.join(generate_string(10, valid_domain_name_chars))\n domain = random.choice(['com', 'co.il', 'info'])\n return domainname+'.'+domain",
"def generateBaseDRQ(self, domain):\n if not DB.isValidTarget(domain):\n Error.printErrorAndExit(domain + \" is not a valid target\")\n pattern_length = len(DB.PATTERNS[domain])\n block = [set()]\n num_of_available_patterns = DB.getNumberOfHostsWithPatternLength(pattern_length) - 1\n if num_of_available_patterns >= Config.RQSIZE:\n hosts = set([domain])\n hosts.update(set(DB.getRandomHostsByPatternLengthB(pattern_length, Config.RQSIZE-1, hosts)))\n pattern_copy = {}\n for host in hosts:\n pattern_copy[host] = DB.getPatternForHost(host)\n pattern_copy[host].remove(host) \n block[0].add(host)\n for i in range(1, pattern_length, 1):\n block.append(set())\n for host in pattern_copy:\n block[i].add(pattern_copy[host].pop())\n else: \n num_of_needed_patterns = Config.RQSIZE - (num_of_available_patterns+1)\n padding = []\n for i in range(num_of_needed_patterns):\n # Find patterns whose lengths sum to pattern_length (if any exist that have not been chosen yet)\n pad1_len = pad2_len = -1\n for pad1_len, pad2_len in zip(range(1, pattern_length/2+1, 1), range(pattern_length-1, pattern_length/2-1, -1)):\n # This is a construct that generates numbers that sum to pattern_length. It is used instead of truly random\n # numbers because it will not get stuck when no more patterns are available.\n if ((DB.getNumberOfHostsWithPatternLengthB(pad1_len, block[0]) > 0) and \\\n (DB.getNumberOfHostsWithPatternLength(pad2_len) > 0)):\n break\n elif pad1_len == pattern_length/2: # No patterns of the correct length have been found, abort\n pad1_len = -1\n if (pad1_len == -1): # Break out of loop as no further patterns can be found.\n break\n # The following few lines get the dummy patterns from the database and saves them to the list of dummy-patterns\n pad1_host = DB.getRandomHostsByPatternLengthB(pad1_len, 1, block[0])[0]\n pad1_pattern = DB.getPatternForHost(pad1_host)\n pad1_pattern.remove(pad1_host)\n block[0].add(pad1_host)\n padding.append([pad1_host])\n for host in pad1_pattern:\n padding[i].append(host)\n pad2_host = DB.getRandomHostsByPatternLength(pad2_len, 1)[0]\n pad2_pattern = DB.getPatternForHost(pad2_host)\n pad2_pattern.remove(pad2_host)\n padding[i].append(pad2_host)\n for host in pad2_pattern:\n padding[i].append(host)\n # We now have as many dummy patterns as we will get. Start distributing them.\n pattern_copy = {}\n block[0].add(domain)\n pattern_copy[domain] = DB.getPatternForHost(domain)\n pattern_copy[domain].remove(domain)\n for element in DB.getRandomHostsByPatternLengthB(pattern_length, num_of_available_patterns, block[0]):\n # Get all patterns with the correct length and add them to the range query\n pattern_copy[element] = DB.getPatternForHost(element)\n pattern_copy[element].remove(element)\n block[0].add(element)\n for i in range(1, pattern_length, 1):\n # Distribute the remaining patterns (those whose lengths sum to the correct length)\n block.append(set())\n for host in pattern_copy:\n block[i].add(pattern_copy[host].pop())\n for pattern in padding:\n block[i].add(pattern[i])\n return block",
"def gen_malicious(num_per_dga=10000):\n domains = []\n labels = []\n\n # We use some arbitrary seeds to create domains with banjori\n banjori_seeds = ['somestring', 'firetruck', 'bulldozer', 'airplane', 'racecar',\n 'apartment', 'laptop', 'laptopcomp', 'malwareisbad', 'crazytrain',\n 'thepolice', 'fivemonkeys', 'hockey', 'football', 'baseball',\n 'basketball', 'trackandfield', 'fieldhockey', 'softball', 'redferrari',\n 'blackcheverolet', 'yellowelcamino', 'blueporsche', 'redfordf150',\n 'purplebmw330i', 'subarulegacy', 'hondacivic', 'toyotaprius',\n 'sidewalk', 'pavement', 'stopsign', 'trafficlight', 'turnlane',\n 'passinglane', 'trafficjam', 'airport', 'runway', 'baggageclaim',\n 'passengerjet', 'delta1008', 'american765', 'united8765', 'southwest3456',\n 'albuquerque', 'sanfrancisco', 'sandiego', 'losangeles', 'newyork',\n 'atlanta', 'portland', 'seattle', 'washingtondc']\n\n segs_size = max(1, num_per_dga/len(banjori_seeds))\n for banjori_seed in banjori_seeds:\n domains += banjori.generate_domains(segs_size, banjori_seed)\n labels += ['banjori']*segs_size\n\n domains += corebot.generate_domains(num_per_dga)\n labels += ['corebot']*num_per_dga\n\n # Create different length domains using cryptolocker\n crypto_lengths = range(8, 32)\n segs_size = max(1, num_per_dga/len(crypto_lengths))\n for crypto_length in crypto_lengths:\n domains += cryptolocker.generate_domains(segs_size,\n seed_num=random.randint(1, 1000000),\n length=crypto_length)\n labels += ['cryptolocker']*segs_size\n\n domains += dircrypt.generate_domains(num_per_dga)\n labels += ['dircrypt']*num_per_dga\n\n # generate kraken and divide between configs\n kraken_to_gen = max(1, num_per_dga/2)\n domains += kraken.generate_domains(kraken_to_gen, datetime(2016, 1, 1), 'a', 3)\n labels += ['kraken']*kraken_to_gen\n domains += kraken.generate_domains(kraken_to_gen, datetime(2016, 1, 1), 'b', 3)\n labels += ['kraken']*kraken_to_gen\n\n # generate locky and divide between configs\n locky_gen = max(1, num_per_dga/11)\n for i in range(1, 12):\n domains += lockyv2.generate_domains(locky_gen, config=i)\n labels += ['locky']*locky_gen\n\n # Generate pyskpa domains\n domains += pykspa.generate_domains(num_per_dga, datetime(2016, 1, 1))\n labels += ['pykspa']*num_per_dga\n\n # Generate qakbot\n domains += qakbot.generate_domains(num_per_dga, tlds=[])\n labels += ['qakbot']*num_per_dga\n\n # ramdo divided over different lengths\n ramdo_lengths = range(8, 32)\n segs_size = max(1, num_per_dga/len(ramdo_lengths))\n for rammdo_length in ramdo_lengths:\n domains += ramdo.generate_domains(segs_size,\n seed_num=random.randint(1, 1000000),\n length=rammdo_length)\n labels += ['ramdo']*segs_size\n\n # ramnit\n domains += ramnit.generate_domains(num_per_dga, 0x123abc12)\n labels += ['ramnit']*num_per_dga\n\n # simda\n simda_lengths = range(8, 32)\n segs_size = max(1, num_per_dga/len(simda_lengths))\n for simda_length in range(len(simda_lengths)):\n domains += simda.generate_domains(segs_size,\n length=simda_length,\n tld=None,\n base=random.randint(2, 2**32))\n labels += ['simda']*segs_size\n\n # matsnu\n domains += matsnu.generate_domains(num_per_dga, include_tld=False)\n labels += ['matsnu']*num_per_dga\n\n # suppobox\n domains += suppobox.generate_domains(num_per_dga, include_tld=False)\n labels += ['suppobox']*num_per_dga\n\n # gozi\n domains += gozi.generate_domains(num_per_dga, include_tld=False)\n labels += ['gozi']*num_per_dga\n\n return domains, labels",
"def __init__(self, var1):\n self.url = var1\n self.b = [ord(i) for i in var1]\n var2 = self.domain_head\n var3 = self.domain_head\n self.domain_tail = self.domain_head\n var4 = False\n var5 = False\n var6 = 0\n while var6 < len(self.b):\n if self.b[var6] == 46:\n var5 = True\n else:\n if self.b[var6] == 47:\n break\n if self.b[var6] == 58:\n if var6 + 2 < len(self.b) and self.b[var6 + 1] == 47 and self.b[var6 + 2] == 47:\n var6 = var6 + 2\n self.host_head = var6\n self.domain_head = var6\n var2 = var6\n var3 = var6\n self.domain_tail = var6\n var6 = var6 + 1\n continue\n if not var4:\n var5 = True\n var4 = True\n if var5:\n var2 = self.domain_head\n self.domain_head = var3\n var3 = self.domain_tail\n self.domain_tail = var6\n var5 = False\n var6 = var6 + 1\n self.host_tail = var6\n if not var4:\n var2 = self.domain_head\n self.domain_head = var3\n var3 = self.domain_tail\n self.domain_tail = var6\n if self.in_second_domain_set(self.b, var3 - self.domain_head - 1, self.domain_head + 1) > 0 and self.in_top_domain_set(self.b, self.domain_tail - var3 - 1, var3 + 1) == 0:\n self.domain_head = var2\n self.domain_head = self.domain_head + 1\n self.host_head = self.host_head + 1",
"def create_challenge():\n\treturn os.urandom(12)",
"def random_ip():\n return new_ip(\"%i.%i.%i.%i\" % (randint(1, 254), # nosec\n randint(1, 254), # nosec\n randint(1, 254), # nosec\n randint(1, 254))) # nosec",
"def domain(self, domain):",
"def argsBuilder(target_dom:string, server_ip: string, domain_ip:string, server_dom_ip:string, ti:float, d:int, packets:int, n_bot:int):\n tf = ti + d #End time of the attack\n new_packets_args = []\n if n_bot == 1: #If dos attack\n ips = randomIP(n_bot, Time.time(), False)\n else: #If ddos attack\n ips = randomIP(n_bot, Time.time(), True)\n ips = randomIP(n_bot, Time.time(), n_bot) #Array with source ip\n ports = randomSourcePorts(n_bot, Time.time()) #Array with source ports\n time = genInter(Time.time(), ti, tf, packets * n_bot) #Arrival time of the requests\n for t in time:\n n = random.randint(0, n_bot - 1)\n dt = abs(random.gauss(0.0001868, 0.0000297912738902)) #Delay time for the response\n while(dt == 0): #Delay time can't be 0\n dt = abs(random.gauss(0.0001868, 0.0000297912738902))\n args = [target_dom, ips[n], server_ip, ports[n], t, Time.time(), domain_ip, server_dom_ip, dt]\n new_packets_args.append(args)\n return new_packets_args",
"def get_random_ip():\n return \".\".join(str(random.randrange(1, 255)) for i in range(4))",
"def generate_url(domainname = None):\n path_length = random.choice([1,2,3,4,5])\n path = ''\n for i in range(path_length):\n path = path + '/' + ''.join(generate_string(5, valid_domain_name_chars))\n if domainname:\n return 'http://www.'+domainname+path\n else: \n return 'http://www.'+generate_domainname()+path",
"def test_url_subdomain(self):\n subdomains = ct.url_subdomain(\"https://www.bad-actor.services/some/url-thats-long?debug=True\")\n assert isinstance(subdomains, list)\n assert len(subdomains) == 1\n subdomains = ct.url_subdomain(\"https://one.two.bad-actor.services/some/url-thats-long?debug=True\")\n assert subdomains[0] == \"one\"\n assert subdomains[1] == \"two\"",
"def random_zone(name='pooey', tld='com'):\n chars = \"\".join(random.choice(string.ascii_letters) for _ in range(8))\n return '{0}-{1}.{2}.'.format(name, chars, tld)",
"def test_generateRandom(self):\n\n # commented as of now as its failing randomly. Race due to\n # monkey patching ???\n # self.assertEqual(len(self.urlShortener.generateShortUrl()), 6)\n # self.assertEqual(len(self.urlShortener.generateShortUrl(7)), 7)\n\n self.assertEqual(self.urlShortener.generateShortUrl().isalnum(), True)",
"def generateRandomIPv4():\n return \".\".join(map(str, (random.randint(0, 255) for _ in range(4))))",
"def test_generate_and_send(mock_sr):\n # mock send packets\n mock_sr.return_value = fake_sr_return()\n\n # init generator\n netprobify = NetProbify()\n netprobify.instantiate_generator()\n\n # generate packets\n TARGET.generate_packets(GROUP, netprobify.id_gen)\n assert len(TARGET.packets) == 10\n assert TARGET.packets[0].dst == \"127.0.0.1\"\n assert TARGET.packets[0].sport == 65000\n\n # check number of packets\n assert len(TARGET.packets) == 10\n\n # check if the sport are rotated in the range\n n = 0\n for pkt in TARGET.packets:\n port = n % 2 + 65000\n n += 1\n assert pkt[UDP].sport == port\n assert pkt.id == n\n\n # subnet test\n UDPunreachable(\n \"localhost\",\n active=True,\n description=\"localhost\",\n destination=\"127.0.0.0/30\",\n config_destination=\"127.0.0.0/30\",\n address_family=\"ipv4\",\n dont_fragment=True,\n is_subnet=True,\n nb_packets=1,\n interval=0,\n timeout=1,\n dst_port=0,\n ip_payload_size=0,\n threshold=1,\n state=\"in production\",\n alert_level=\"paging\",\n is_dynamic=False,\n dns_update_interval=0,\n groups={\"test\"},\n lifetime={\"days\": \"1\"},\n creation_date=None,\n )\n\n TARGET.generate_packets(GROUP, netprobify.id_gen)\n ip_addresses = [\"127.0.0.0\", \"127.0.0.1\", \"127.0.0.2\", \"127.0.0.3\"]\n for pkt in TARGET.packets:\n n += 1\n assert pkt.dst in ip_addresses\n assert pkt.id == n\n\n # fake packets sending\n result = []\n TARGET.send_packets(result, \"WARNING\", GROUP)\n\n assert result == [\n {\n 65000: {\"sent\": 1, \"loss\": 1, \"timestamp_ooo\": 0, \"latency\": []},\n 65001: {\"sent\": 1, \"loss\": 0, \"timestamp_ooo\": 0, \"latency\": [0.1]},\n \"name\": \"localhost\",\n \"probing_type\": \"UDPunreachable\",\n \"groups\": {\"test\"},\n \"destination\": \"127.0.0.1\",\n \"address_family\": \"ipv4\",\n \"state\": \"in production\",\n \"alert_level\": \"paging\",\n \"ip_payload_size\": 8,\n \"port_mismatch\": 0,\n }\n ]",
"def tubeid():\n return binascii.hexlify(os.urandom(12))",
"def create_dns_name ( base_name, name ) :\n return create_r53_name( base_name, name) + '.mse-esp.com'",
"def genIp():\n ip = \".\".join(str(random.randint(0, 255)) for _ in range(4))\n return ip",
"def get_post_data(self, random_str):\n return {\n 'root_domain': '{0}.{0}.mozilla.com'.format(\n random_label() + random_str),\n 'soa_primary': 'ns1.mozilla.com',\n 'soa_contact': 'noc.mozilla.com',\n 'nameserver_1': 'ns1.mozilla.com',\n 'nameserver_2': 'ns2.mozilla.com',\n 'nameserver_3': 'ns3.mozilla.com',\n 'ttl_1': random_byte(),\n 'ttl_2': random_byte(),\n 'ttl_3': random_byte(),\n }",
"def spoof_packet(packet):",
"def gen_random_fightID():\n pass",
"def generateBaseDRQ(self, domain):\n if not DB.isValidTarget(domain):\n Error.printErrorAndExit(domain + \" is not a valid target\")\n patlen = DB.getPatternLengthForHost(domain)\n block = [set()]\n pattern = DB.getPatternForHost(domain) # Get the actual pattern of the target\n randoms = DB.getRandomHosts((Config.RQSIZE-1)*len(pattern)) # Get random hosts (dummies)\n pattern.remove(domain)\n block[0].add(domain)\n i = 1\n for subquery in pattern: # Create the blocks that will hold dummies and actual queries\n block.append(set())\n block[i].add(subquery) # Add the actual query to its respective block\n i += 1\n for query, index in zip(randoms, cycle(range(patlen))): \n # distribute the randomly chosen dummy queries as evenly as possible across the blocks\n block[index].add(query)\n return block",
"def main() -> None:\n\n # region Init Raw-packet classes\n base: Base = Base(admin_only=True, available_platforms=['Linux', 'Darwin', 'Windows'])\n utils: Utils = Utils()\n # endregion\n\n # region Variables\n fake_domains: List[str] = list()\n no_such_domains: List[str] = list()\n fake_ipv4_addresses: List[str] = list()\n fake_ipv6_addresses: List[str] = list()\n # endregion\n\n # region Parse script arguments\n parser: ArgumentParser = ArgumentParser(description=base.get_banner(__script_name__),\n formatter_class=RawDescriptionHelpFormatter)\n parser.add_argument('-i', '--interface', help='Set interface name for send DNS reply packets', default=None)\n parser.add_argument('-p', '--port', type=int,\n help='Set UDP port for listen DNS request packets (default: 53)', default=53)\n parser.add_argument('-t', '--target_mac', help='Set target MAC address', default=None)\n parser.add_argument('--T4', help='Set target IPv4 address', default=None)\n parser.add_argument('--T6', help='Set target IPv6 address', default=None)\n parser.add_argument('-c', '--config_file',\n help='Set json config file name, example: --config_file \"dns_server_config.json\"',\n default=None)\n parser.add_argument('--fake_domains',\n help='Set fake domain regexp or domains, example: --fake_domains \".*apple.com,.*google.com\"',\n default=None)\n parser.add_argument('--no_such_domains', help='Set no such domain or domains, ' +\n 'example: --no_such_domains \"apple.com,google.com\"', default=None)\n parser.add_argument('--fake_ipv4',\n help='Set fake IP address or addresses, example: --fake_ipv4 \"192.168.0.1,192.168.0.2\"',\n default=None)\n parser.add_argument('--fake_ipv6',\n help='Set fake IPv6 address or addresses, example: --fake_ipv6 \"fd00::1,fd00::2\"',\n default=None)\n parser.add_argument('--ipv6', action='store_true', help='Enable IPv6')\n parser.add_argument('--disable_ipv4', action='store_true', help='Disable IPv4')\n parser.add_argument('--log_file_name', type=str,\n help='Set file name for save DNS queries (default: \"dns_server_log\")',\n default='dns_server_log')\n parser.add_argument('--log_file_format', type=str,\n help='Set file format for save results: csv, xml, json, txt (default: \"json\")',\n default='json')\n parser.add_argument('-f', '--fake_answer', action='store_true',\n help='Set your IPv4 or IPv6 address in all answers')\n parser.add_argument('-q', '--quiet', action='store_true', help='Minimal output')\n args = parser.parse_args()\n # endregion\n\n # region Print banner\n if not args.quiet:\n base.print_banner(__script_name__)\n # endregion\n\n try:\n\n # region Get listen network interface, your IP and MAC address, first and last IP in local network\n current_network_interface: str = \\\n base.network_interface_selection(interface_name=args.interface,\n message='Please select a network interface for ' +\n __script_name__ + ' from table: ')\n current_network_interface_settings: Dict[str, Union[None, str, List[str]]] = \\\n base.get_interface_settings(interface_name=current_network_interface,\n required_parameters=['mac-address',\n 'ipv4-address'])\n if current_network_interface_settings['ipv6-link-address'] is None:\n current_network_interface_settings['ipv6-link-address'] = \\\n base.make_ipv6_link_address(current_network_interface_settings['mac-address'])\n # endregion\n\n # region General output\n base.print_info('Network interface: ', current_network_interface_settings['network-interface'])\n base.print_info('Your IPv4 address: ', current_network_interface_settings['ipv4-address'])\n base.print_info('Your IPv6 address: ', current_network_interface_settings['ipv6-link-address'])\n base.print_info('Your MAC address: ', current_network_interface_settings['mac-address'])\n # endregion\n\n # region Create fake domains list\n if args.fake_domains is not None:\n _fake_domains: str = sub(r' +', '', args.fake_domains)\n for domain_name in _fake_domains.split(','):\n fake_domains.append(domain_name)\n # endregion\n\n # region Create no such name list\n if args.no_such_domains is not None:\n _no_such_domains: str = sub(r' +', '', args.no_such_domains)\n for no_such_name in _no_such_domains.split(','):\n no_such_domains.append(no_such_name)\n # endregion\n\n # region Create fake ipv4 addresses list\n if args.fake_ipv4 is not None:\n _fake_ipv4: str = sub(r' +', '', args.fake_ipv4)\n for _ipv4_address in _fake_ipv4.split(','):\n fake_ipv4_addresses.append(utils.check_ipv4_address(network_interface=current_network_interface,\n ipv4_address=_ipv4_address,\n is_local_ipv4_address=False,\n parameter_name='fake IPv4 address'))\n # endregion\n\n # region Create fake ipv6 addresses list\n if args.fake_ipv6 is not None:\n _fake_ipv6: str = sub(r' +', '', args.fake_ipv6)\n for _ipv6_address in _fake_ipv6.split(','):\n fake_ipv6_addresses.append(utils.check_ipv6_address(network_interface=current_network_interface,\n ipv6_address=_ipv6_address,\n is_local_ipv6_address=False,\n parameter_name='fake IPv6 address',\n check_your_ipv6_address=False))\n # endregion\n\n # region Start DNS server\n dns_server: DnsServer = DnsServer(network_interface=current_network_interface)\n dns_server.start(listen_port=args.port,\n target_mac_address=args.target_mac,\n target_ipv4_address=args.T4,\n target_ipv6_address=args.T6,\n fake_answers=args.fake_answer,\n fake_ipv4_addresses=fake_ipv4_addresses,\n fake_ipv6_addresses=fake_ipv6_addresses,\n fake_domains_regexp=fake_domains,\n no_such_domains=no_such_domains,\n listen_ipv6=args.ipv6,\n disable_ipv4=args.disable_ipv4,\n config_file=args.config_file,\n log_file_name=args.log_file_name,\n log_file_format=args.log_file_format)\n # endregion\n\n except KeyboardInterrupt:\n base.print_info('Exit')\n exit(0)\n\n except AssertionError as Error:\n base.print_error(Error.args[0])\n exit(1)",
"def short_url_gen(stringLength=5):\n letters = string.ascii_letters + string.digits\n return ''.join(random.choice(letters) for i in range(stringLength))",
"def random_ip(pattern=None):\n if pattern is None:\n pattern = '*.*.*.*'\n num_asterisks = 0\n for c in pattern:\n if c == '*':\n num_asterisks += 1\n rand_list = [random.randint(1, 255) for i in range(0, num_asterisks)]\n for item in rand_list:\n pattern = pattern.replace('*', str(item), 1)\n return pattern",
"def random_invite_hash():\n return ''.join(random.choice(string.ascii_lowercase) for i in range(25))",
"def regularResponse(p, dom: string, ip_dom: string, ip_srv: string, dt: float):\n id_IP = int(RandShort()) #id for IP layer\n ar_ans = DNSRR(rrname = dom, rdata = ip_dom) #Domain answer\n ar_ext = DNSRROPT(rclass=4096) #Extension\n an_ans = DNSRR(rrname = dom, rdata = ip_srv) #Domain server answer\n ns_ans = DNSRR(rrname = dom, type = 2, rdata = dom) #Name server answer\n ans = Ether(dst= '18:66:da:e6:36:56', src= '18:66:da:4d:c0:08')/IP(dst = p[IP].src, src = p[IP].dst, id = id_IP)/UDP(dport = p[UDP].sport, sport = p[UDP].dport)/DNS(id = p[DNS].id, qr = 1, rd = 0, cd = 1, qd = p[DNS].qd, ns = ns_ans, an = an_ans,ar= ar_ans/ar_ext)\n ans.time = p.time + dt #Set arrival time\n return ans",
"def resolve(self,\n ns_servers: List[Dict[str, str]] = [{'IPv4 address': '8.8.8.8', 'MAC address': '01:23:45:67:89:0a'}],\n domain: str = 'google.com',\n subdomains_list: List[str] = ['www', 'mail', 'ns', 'test'],\n subdomains_file: Union[None, str] = None,\n subdomains_brute: bool = False,\n max_threats_count: int = 10,\n udp_destination_port: int = 53,\n timeout: int = 30) -> List[Dict[str, str]]:\n\n try:\n\n # region Clear results list\n self.index_of_dns_query = 0\n self.results.clear()\n self.uniq_hosts.clear()\n # endregion\n\n # region Set target domain\n assert not (domain == ''), \\\n 'Target domain is empty, please set target domain in this parameter: ' + self.base.info_text('domain')\n self.domain = domain\n # endregion\n\n # region Subdomains list\n if len(subdomains_list) > 0:\n self.subdomains = subdomains_list\n # endregion\n\n # region Subdomains file\n if subdomains_file is not None:\n assert isfile(subdomains_file), \\\n 'File with subdomain list:' + self.base.error_text(subdomains_file) + ' not found!'\n with open(subdomains_file) as subdomains_file_descriptor:\n for subdomain in subdomains_file_descriptor.read().splitlines():\n self.subdomains.append(subdomain)\n # endregion\n\n # region Subdomains brute\n if subdomains_brute:\n\n if not self.quiet:\n self.base.print_info('Make subdomains list for brute .... ')\n\n for character1 in RawDnsResolver.available_characters:\n self.subdomains.append(character1)\n for character2 in RawDnsResolver.available_characters:\n self.subdomains.append(character1 + character2)\n for character3 in RawDnsResolver.available_characters:\n self.subdomains.append(character1 + character2 + character3)\n # endregion\n\n # region Check length of subdomains list\n assert len(self.subdomains) != 0, \\\n 'List containing subdomains is empty, please set any of this parameters: ' \\\n + self.base.info_text('subdomain_list') + ' or ' \\\n + self.base.info_text('subdomain_file') + ' or ' \\\n + self.base.info_text('subdomain_brute')\n # endregion\n\n # region Create raw socket\n raw_socket: socket = socket(AF_PACKET, SOCK_RAW)\n raw_socket.bind((self.network_interface, 0))\n # endregion\n\n # region Truncate temporary results file\n temporary_results_file = open(RawDnsResolver.temporary_results_filename, 'r+')\n temporary_results_file.truncate()\n temporary_results_file.close()\n # endregion\n\n # region Sniff DNS answers\n if not self.quiet:\n self.base.print_info('Start DNS answers sniffer for domain: ', self.domain)\n\n threats: ThreadManager = ThreadManager(max_threats_count)\n self._sniff_start(self.your_mac_address, self.your_ipv4_address,\n self.your_ipv6_address, udp_destination_port)\n threats.add_task(self._sniff_check)\n # endregion\n\n # region Send DNS queries\n if not self.quiet:\n self.base.print_info('Start sending DNS queries, time: ', str(datetime.now()))\n\n self._send_queries(send_socket=raw_socket,\n source_mac_address=self.your_mac_address,\n source_ipv4_address=self.your_ipv4_address,\n source_ipv6_address=self.your_ipv6_address,\n domain=domain,\n ns_servers=ns_servers,\n destination_port=udp_destination_port,\n max_threats_count=int(max_threats_count) - 1,\n subdomains=self.subdomains)\n # endregion\n\n # region Timeout\n if not self.quiet:\n self.base.print_info('Wait timeout: ', str(timeout) + ' sec')\n sleep(timeout)\n # endregion\n\n # region Return results\n self._sniff_stop()\n if not self.quiet:\n if len(self.results) > 0:\n self.base.print_success('Found ', str(len(self.results)),\n ' subdomains and addresses for domain: ', self.domain)\n else:\n self.base.print_error('Not found subdomains in domain: ', self.domain)\n return self.results\n # endregion\n\n except AssertionError as Error:\n self.base.print_error(Error.args[0])\n exit(1)",
"def randomIP():\n\tip = \".\".join(map(str, (random.randint(0,255)for _ in range(4))))\n\treturn ip",
"def random_url():\r\n url = u\"http://{0}.com\".format(random_string())\r\n return url"
] | [
"0.5977173",
"0.59681684",
"0.57653284",
"0.57206655",
"0.5675084",
"0.5635172",
"0.54800284",
"0.54657984",
"0.5421064",
"0.53999",
"0.53358173",
"0.532985",
"0.53167856",
"0.5308277",
"0.52976847",
"0.5266126",
"0.5246301",
"0.52280766",
"0.52115506",
"0.5182972",
"0.516677",
"0.51526916",
"0.5141414",
"0.5130938",
"0.5097179",
"0.5081767",
"0.50812876",
"0.50559926",
"0.5053407",
"0.5052928"
] | 0.71283317 | 0 |
Gives an array of arguments to create packets | def argsBuilder(target_dom:string, server_ip: string, domain_ip:string, server_dom_ip:string, ti:float, d:int, packets:int, n_bot:int):
tf = ti + d #End time of the attack
new_packets_args = []
if n_bot == 1: #If dos attack
ips = randomIP(n_bot, Time.time(), False)
else: #If ddos attack
ips = randomIP(n_bot, Time.time(), True)
ips = randomIP(n_bot, Time.time(), n_bot) #Array with source ip
ports = randomSourcePorts(n_bot, Time.time()) #Array with source ports
time = genInter(Time.time(), ti, tf, packets * n_bot) #Arrival time of the requests
for t in time:
n = random.randint(0, n_bot - 1)
dt = abs(random.gauss(0.0001868, 0.0000297912738902)) #Delay time for the response
while(dt == 0): #Delay time can't be 0
dt = abs(random.gauss(0.0001868, 0.0000297912738902))
args = [target_dom, ips[n], server_ip, ports[n], t, Time.time(), domain_ip, server_dom_ip, dt]
new_packets_args.append(args)
return new_packets_args | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_args(port, n, t, population, test=None, value=0, failure=None, tx_rate=0, loglevel=logging.INFO, output=None,\n broadcast=True, fan_out=10, profile=None, validate=False, ignore_promoter=False):\n res = [str(port), str(n), str(t), str(population)]\n\n if test is not None:\n res.append('--test')\n res.append(test)\n\n res.append('--value')\n res.append(str(value))\n\n if failure is not None:\n res.append('--failure')\n res.append(failure)\n\n res.append('--tx-rate')\n res.append(str(tx_rate))\n\n if loglevel == logging.DEBUG:\n res.append('--debug')\n elif loglevel == logging.INFO:\n res.append('-v')\n\n # None represents stdout\n if output is not None:\n res.append('-o')\n res.append(output)\n\n if broadcast:\n res.append('--broadcast')\n\n res.append('--fan-out')\n res.append(str(fan_out))\n\n if profile:\n res.append('--profile')\n res.append(profile)\n\n if validate:\n res.append('--validate')\n\n if ignore_promoter:\n res.append('--ignore-promoter')\n\n return res",
"def setup_args(**kargs):\n args = [get_nupack_exec_path(kargs['exec_name']),\n '-material', kargs['material'], '-sodium', kargs['sodium'],\n '-magnesium', kargs['magnesium'], '-dangles', kargs['dangles'], '-T', kargs['T']]\n if kargs['multi']: args += ['-multi']\n if kargs['pseudo']: args += ['-pseudo']\n return args",
"def pack_op_args(inputs, outputs, attrs):\n op_args = (inputs, outputs, attrs)\n return [item for arg in op_args for item in arg]",
"def Args(parser):",
"async def prepare_args(self, nodes):\n args = list(cfg['tools.masscan.args'])\n\n rate = self.scan_rate()\n\n args.extend(['--rate', rate])\n\n include_ports = NmapTool.list_to_ports_string(tcp=self.tcp and cfg['portdetection.tcp.ports.include'],\n udp=self.udp and cfg['portdetection.udp.ports.include'])\n\n exclude_ports = NmapTool.list_to_ports_string(tcp=self.tcp and cfg['portdetection.tcp.ports.exclude'],\n udp=self.udp and cfg['portdetection.udp.ports.exclude'])\n\n if not include_ports:\n raise StopCommandException(\"No ports for scan\")\n args.extend(['--ports', include_ports])\n\n if exclude_ports:\n args.extend(['--exclude-ports', exclude_ports])\n\n args.extend([str(node.ip) for node in nodes])\n\n return args",
"def create_usdzconvert_arguments(args: list) -> list:\n usdz_converter_path = current_app.config.get('USDZ_CONVERTER_PATH') / \\\n current_app.config.get('USDZ_CONVERTER_SCRIPT_PATH')\n\n arguments = [_get_converter_interpreter_arg(),\n usdz_converter_path.resolve().as_posix()]\n\n for arg in args:\n arguments.append(arg)\n\n return arguments",
"def Array( *args ):\n tArray = ()\n for arg in args:\n tArray += (arg,)\n return tArray",
"def Array( *args ):\n tArray = ()\n for arg in args:\n tArray += (arg,)\n return tArray",
"def parse_arguments(args):",
"def genPackets(l: list):\n check(len(l), lambda x: x== 9, \"Wrong number of given arguments for genPackets(l), must be 9\")\n req = randomSubBuilder(l[0], l[1], l[2], l[3], l[4], l[5])\n res = regularResponse(req, l[0], l[6], l[7], l[8])\n return [req, res]",
"def pack(*args):\n result = np.empty(len(args), dtype=object)\n for i, arg in enumerate(args):\n result[i] = arg\n return result",
"def generateArgsList(self, I1, I2, O1, O2, O3, N, M, S, C ):\n ArgsList = [ \n \"-n\", str(N), \n # \"-m\", str(M), \n # \"-s\", str(S), \n \"-c\", str(C), \n ]\n if I1 > 0 or I2 > 0:\n if I1 > 0:\n ArgsList.append(\"-i1\")\n ArgsList.append(str(I1)) \n if I2 > 0:\n ArgsList.append(\"-i2\")\n ArgsList.append(str(I2))\n else: \n ArgsList.append(\"--noinput\")\n \n if O1 > 0 or O2 > 0 or O3 > 0:\n if O1 > 0:\n ArgsList.append(\"-o1\")\n ArgsList.append(str(O1)) \n if O2 > 0:\n ArgsList.append(\"-o2\")\n ArgsList.append(str(O2))\n if O3 > 0:\n ArgsList.append(\"-o3\")\n ArgsList.append(str(O3))\n else: \n ArgsList.append(\"--nooutput\")\n \n ArgsList.append(\"--nosummary\")\n ArgsList.append(\"--verbose\")\n return ArgsList",
"def create(*args):",
"def create_args():\n return {\n \"team_id\": fields.UUID(missing=None),\n \"file_ids\": fields.List(fields.UUID(), load_from=\"image_ids\", location=\"json\"),\n }",
"def _create_arguments(self, args):\n assert isinstance(args, (list, tuple))\n\n arguments = []\n index = 0\n for arg in args:\n assert isinstance(arg, (list, tuple))\n assert len(arg) == 2 or len(arg) == 3\n\n identifier = arg[0]\n if isinstance(arg[1], str):\n idl_type = self._create_type(\n arg[1], is_optional=(len(arg) == 3))\n else:\n idl_type = arg[1]\n\n default_value = None\n if len(arg) == 3:\n default_value = self._create_literal_constant(arg[2])\n\n arguments.append(\n Argument.IR(\n identifier,\n index=index,\n idl_type=idl_type,\n default_value=default_value))\n\n index += 1\n\n return arguments",
"def __init__(self, nums):\n self.args = nums",
"def _RegisterInputs(self):\n data = []\n data.append(self._Arg(\n self._message_type, self._source, self._cn0_field))\n data.append(self._Arg(\n self._message_type, self._source, self._num_field))\n data.append(self._Arg(\n self._message_type, self._source, self._type_field))\n return data",
"def arg_list():\n arg_list = [\n ['-d', '--domain', 'Specify the domain you are using'],\n ['-t', '--template-path', 'Specify template path'],\n ['-s', '--secrets-path', 'Specify template path'],\n ['-p', '--project', 'Specify a project name'],\n ['-c', '--cloud-platform', 'Specify the platform used'],\n ['-so', '--secrets-only', 'Generate secrets only'],\n ['-db', '--database-host', 'Specify the database host'],\n ['-dbc', '--database-connection-name', 'Specify the database connection name (GCP)'],\n ['-sbn', '--storage-bucket-name', 'Specify storage bucket name'],\n ['-sb', '--storage-backend', 'Specify storage backend s3/gcp/filesystem'],\n ['--acm', '--aws-cert-arn', 'Specify AWS ACM'],\n ['--sg-id', '--aws-alg-sg-id', 'Specify AWS SG ID'],\n ['--sentry', '--senty-dsn', 'Specify Sentry DSN'],\n ['-e', '--environment', 'Specify environment'],\n ['-g', '--gather', 'enable Gather yes or no'],\n ['--cm', '--cert-manager', 'Using cert manager?'],\n ['-m', '--modules', 'Aether modules i.e odk,ui,sync'],\n ['-r', '--redis-url', 'Redis endpoint for CouchDB sync'],\n ['-cdb', '--couchdb-url', 'Redis endpoint for CouchDB sync'],\n ['-gc', '--google-client-id', ' Google client ID for CouchDB sync']\n ]\n return arg_list",
"def __make_params(args):\n data = {}\n for i in range(len(args)):\n if i == 0: # saltando a primeira iteracao pra\n # saltar o parametro que é o nome do arquivo de execução\n continue\n if not i % 2 == 0:\n data[args[i]] = args[i + 1]\n return data",
"def prepare_args(self):\n args = []\n if self.login:\n args.extend(['-L', cfg['tools.hydra.loginfile']])\n if self._port.is_ipv6:\n args.append('-6')\n\n args.extend(['-P', cfg['tools.hydra.passwordfile'], '-s', str(self._port.number), str(self._port.node.ip),\n self.service, ])\n return args",
"def networkClassCreator(className,*args):\n\n print 'In networkClassCreator: ',args\n return WorldManipulationEvent(['create',className,args])",
"def create_argument_list(self):\n raise NotImplementedError",
"def add_arguments(self, parser):",
"def _parse_create_args(self, args):\r\n size = args['--size']\r\n location = args['--datacenter']\r\n return int(size), str(location)",
"def create_arguments(parser):\n parser.add_argument(\"-f\", \"--list_file\", help=\"file to print the list to\")\n parser.add_argument(\"-p\", \"--path\", help=\"path to the files\")",
"def Args(parser):\n flags.AddRegion(parser)\n flags.AddCluster(parser)",
"def __prepare_args(self, args):\n ret = []\n for a in args:\n if isinstance(a, bytes):\n if self.__size_expr.match(a):\n ret += [a]\n else:\n ret += [b'\"' + a + b'\"']\n continue\n ret += [bytes(str(a).encode(\"utf-8\"))]\n return ret",
"def _gen_cmd(cmd, address):\n family = {4: 'inet', 6: 'inet6'}[address[0].version]\n args = ['addr', cmd, '%s/%s' % (address[0], address[1])]\n if family == 'inet' and cmd == 'add':\n args += ['brd', '+']\n args += ['dev', real_ifname]\n if family == 'inet6':\n args = ['-6'] + args\n return args",
"def fill_args(args):\n args.agent_module = 'dstar_sgolam_walker'\n args.checkpoint_path = None\n args.exp_config = 'configs/baselines/dstar_proto_sgolam.yaml'\n args.num_episodes = 25\n \n return args",
"def getPositionalArgs():"
] | [
"0.6156209",
"0.5899861",
"0.5849596",
"0.58321226",
"0.5789751",
"0.5764216",
"0.57535285",
"0.57535285",
"0.57303244",
"0.5728946",
"0.5726997",
"0.5719696",
"0.56849927",
"0.5654152",
"0.5565254",
"0.55376214",
"0.55330455",
"0.5521985",
"0.5521701",
"0.5485274",
"0.54265696",
"0.54237473",
"0.5402536",
"0.539932",
"0.53956383",
"0.5395314",
"0.539319",
"0.53690976",
"0.5351384",
"0.5347514"
] | 0.62451375 | 0 |
Start the stopwatch if it is not running; stop it if it is running. | def start_stop( self ):
if self.stop_event.is_set():
# Stopwatch was stopped, so start it.
self.stop_event.clear()
self.timer_thread = Thread( target=self.run_stopwatch, args=( time(), ) )
self.timer_thread.start()
else:
# Stopwatch was running, so stop it.
self.stop_event.set() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_stopwatch( self, start_time ):\r\n self.start_time = start_time\r\n while not self.stop_event.is_set():\r\n sleep( 0.01 ) # Accurate to about 1/100th of a second.\r\n self.gui.time_label.setText( \"{:.2f}\".format( time() - self.start_time ) )",
"def start(self):\n try:\n self.sec_start = time.clock()\n self.sec_stop = None\n self.sec_elapse = None\n self.duration_pause = 0\n\n # Will stop pausing if it's still pausing\n if self.timer_pause is not None \\\n and self.timer_pause.is_on():\n self.timer_pause.stop()\n\n except:\n raise",
"def start(self):\n\t\tif self.__start_time is not None:\n\t\t\traise TimerError(f\"Timer is running. Use .stop() to stop the timer.\")\n\n\t\tself.__start_time = time.perf_counter()",
"def start(self):\n if self._start_time is not None:\n raise TimerError(\"Timer is running. Use stop() to stop it\")\n\n self._start_time = time.perf_counter()",
"def start(self):\n \n if not self.is_running:\n self._timer = threading.Timer(self.interval, self._run)\n self._timer.start()\n self.is_running = True",
"def test_issue_start_stop_watch(self):\n pass",
"def timer_startIfNeeded():\n nonlocal b_timerStart\n for k, v in kwargs.items():\n if k == 'timerStart': b_timerStart = bool(v)\n if b_timerStart:\n other.tic()",
"def start(self):\n# if self._start_time is not None:\n self._start_time = time.perf_counter()",
"def stop(self):\n# if self._start_time is None:\n elapsed_time = time.perf_counter() - self._start_time\n self._start_time = None",
"def start_timer(self):\n self.start_time = time.time()",
"def stop(self):\n self._schedule(0, 0)\n self._started = False",
"def test_issue_stop_stop_watch(self):\n pass",
"def reset_and_stop(self):\n self.enabled = False\n self.start_time = None",
"def StartTimer(self):\n self._start_time = time.time()",
"def _start_stop(self):\n if not self.running:\n #get configuration from gui\n self._start_session()\n else:\n self._stop_session()",
"def start_timer(self):\n self.start_time = datetime.now()",
"def start_stop(self, event):\n self.start_button.SetLabel('Measuring')\n self.start_button.Enable = False\n # Do nothing as of now. Will call measuring functions later.\n self.txt_info_box.SetLabel('Starting measurement.')\n time.sleep(2)\n self.start_button.SetLabel('Start measurement')\n self.start_button.Enable = True\n self.txt_info_box.SetLabel('Completed measurement.')\n self.result_box.SetLabel(\"100.00\")",
"def stop(self):\n\t\tif self.__start_time is None:\n\t\t\traise TimerError(f\"Timer is not running, so it can't be stopped. Use .start to start the timer.\")\n\n\t\telapsed_time = time.perf_counter() - self.__start_time\n\n\t\tself.__start_time = None",
"def start(self):\n\n if not self.profiler_enabled_time_str:\n return\n\n last_end_time = -1\n for time_str in self.profiler_enabled_time_str.split(','):\n begin_time = int(time_str.split(':')[0].strip())\n end_time_str = time_str.split(':')[1].strip() if ':' in time_str else None\n end_time = int(end_time_str) if end_time_str else 365 * 24 * 60 * 60\n if begin_time <= last_end_time:\n raise ValueError('begin_time {} is no larger than the last '\n 'end_time {}'.format(begin_time, last_end_time))\n if end_time <= begin_time:\n raise ValueError('end_time {} is no larger than begin_time {}'.format(\n end_time, begin_time))\n # 4th positional arg added to support Python2 for the short-term.\n self.scheduler.enter(begin_time, 1, _start_profiler,\n argument=(self.output_dir,))\n self.scheduler.enter(end_time, 1, _stop_profiler, ()) # pylint: disable=no-value-for-parameter\n last_end_time = end_time\n\n threading.Thread(target=self.scheduler.run).start()",
"def start(self) -> None:\n self.stopping.clear()\n self.thread = threading.Thread(target=self._run, daemon=True, name=self.thread_name)\n self.thread.start()",
"def stopif(self, stop):\n if stop:\n self._stopsim = True",
"def stop_running(self):\n self.running = False",
"def reset_stop_timer(self) -> None: \r\n self.stop_timer = 0",
"def start_stop(now, start, stop, temporary_user, config, tz):\n if now.time() >= start and now.time() < stop:\n action_required_ids, no_action_required_ids = get_instance_ids(temporary_user, config, 'stopped', now, tz)\n action_on_instances(temporary_user.start_instances, action_required_ids, 'Start')\n elif now.time() >= stop:\n action_required_ids, no_action_required_ids = get_instance_ids(temporary_user, config, 'running', now, tz)\n action_on_instances(temporary_user.stop_instances, action_required_ids, 'Stop')",
"def start_clock(self):\n pass",
"def run(self):\n if self.running:\n raise RuntimeError(\"Already running!\")\n try:\n self.running = True\n self.stopping = False\n while not self.stopping:\n self.prepare_timers()\n if self.debug_blocking:\n self.block_detect_pre()\n self.fire_timers(self.clock())\n if self.debug_blocking:\n self.block_detect_post()\n self.prepare_timers()\n wakeup_when = self.sleep_until()\n if wakeup_when is None:\n sleep_time = self.default_sleep()\n else:\n sleep_time = wakeup_when - self.clock()\n if sleep_time > 0:\n self.wait(sleep_time)\n else:\n self.wait(0)\n else:\n self.canceled_timers = 0\n del self.timers[:]\n del self.next_timers[:]\n finally:\n self.running = False\n self.stopping = False",
"def stop(self):\r\n self.stopped = True\r\n time.sleep(1)",
"def start(self):\n self.stop_recognising.clear()\n self.thread.start()",
"def stop(self):\n with self._lock:\n self._running.clear()\n if self._timer:\n self._timer.cancel()\n self._timer = None",
"def stop_timing_no_callback(self) -> None:\n self._is_timing = False"
] | [
"0.7138144",
"0.7008591",
"0.67390275",
"0.66714877",
"0.66500926",
"0.6405324",
"0.6306759",
"0.626701",
"0.619082",
"0.6111843",
"0.60280937",
"0.60244936",
"0.59438825",
"0.59425366",
"0.5927466",
"0.59179336",
"0.5896777",
"0.5890608",
"0.58734024",
"0.5858474",
"0.58576584",
"0.5822657",
"0.5794608",
"0.57657695",
"0.5765451",
"0.57470876",
"0.5736225",
"0.5719257",
"0.57148075",
"0.5714738"
] | 0.8451777 | 0 |
Runs a stopwatch loop showing the time elapsed at regular intervals. | def run_stopwatch( self, start_time ):
self.start_time = start_time
while not self.stop_event.is_set():
sleep( 0.01 ) # Accurate to about 1/100th of a second.
self.gui.time_label.setText( "{:.2f}".format( time() - self.start_time ) ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def timer():\n start = time.time()\n\n yield\n\n end = time.time()\n\n print('Elapsed: {:.2f}s'.format(end - start))",
"def run(self):\n last_time = time.time()\n while self.running:\n now_time = time.time()\n interval = now_time - last_time\n last_time = now_time\n self.update(interval)\n time.sleep(Options['update interval'])",
"def timer():\n start = time.time()\n # Send control back to the context block\n yield\n end = time.time()\n print('Elapsed: {:.2f}s'.format(end - start))",
"def run(self):\n while not self.done:\n time_delta = self.clock.tick(self.fps)\n self.event_loop()\n self.update(time_delta)\n pg.display.update()\n if self.show_fps:\n fps = self.clock.get_fps()\n with_fps = \"{} - {:.2f} FPS\".format(self.caption, fps)\n pg.display.set_caption(with_fps)",
"def perf_timer():\n start_time = datetime.now()\n yield\n end_time = datetime.now()\n log.info(end_time - start_time)",
"def timer():\n start = time.time()\n # Send control back to the context block\n yield timer()\n end = time.time()\n print('Elapsed: {:.2f}s'.format(end - start))",
"def run_timer():\n \n start_time = time.time()\n print(start_time)\n stopper = input(\"Press enter to stop\")\n end_time = time.time()\n print(\"You have finished collecting the blocks!\")\n duration = int(end_time - start_time)\n if duration > 25:\n print(\"You were too slow collecting the blocks, better luck next time\")\n else: \n print(\"Good job speedy, you collected all the blocks before time ran out!\")",
"def loop_run(self):\n super(TimerLoop, self).loop_run()\n self.timer = self.cothread.Timer(self.timeout,\n self.callback,\n retrigger=True)",
"def timer(ctx, config):\n log.info('Starting timer...')\n start = time.time()\n try:\n yield\n finally:\n duration = time.time() - start\n log.info('Duration was %f seconds', duration)\n ctx.summary['duration'] = duration",
"def stopwatch(message):\n t0 = time.time()\n try:\n yield\n finally:\n t1 = time.time()\n print('Total elapsed time for %s: %f s' % (message, t1 - t0))",
"def stopwatch(message):\r\n t0 = time.time()\r\n try:\r\n yield\r\n finally:\r\n t1 = time.time()\r\n print('Total elapsed time for %s: %.3f' % (message, t1 - t0))",
"def run(self):\n\t\twhile True:\n\t\t\tself.clock.tick(self.settings.max_fps)\n\t\t\tself._check_events()\n\t\t\tself._update_screen()",
"def timer(description):\n t0 = time.time()\n yield\n print(f'[{description}] done in {time.time() - t0:.0f} s')",
"def change_stopwatch(timez):\r\n\r\n m = timez // 60\r\n s2 = timez % 60\r\n s1 = 0 if s2 < 10 else \"\"\r\n now = f\"{m}:{s1}{s2}\"\r\n stopwatch.configure(text=now)",
"def run( self ):\r\n \r\n # Execute the per-cycle work specifed by the user\r\n for f in self.updateFuncList:\r\n f() # Please make these lightweight and pertain to UI drawing!\r\n \r\n # Update window\r\n self.rootWin.update_idletasks() # idk , draw or something!\r\n \r\n # Wait remainder of period\r\n elapsed = time.time() * 1000 - self.last\r\n if elapsed < self.stepTime:\r\n sleepTime = int( self.stepTime - elapsed ) \r\n else:\r\n sleepTime = 0\r\n # 4.e. Mark beginning of next loop\r\n self.last = time.time() * 1000 \r\n self.rootWin.after( sleepTime , self.run )",
"def main():\r\n\r\n print\r\n print '** Demonstrating new Timer print statement:'\r\n\r\n with Timer('Test Timer') as tm:\r\n current_second = 0\r\n while tm.current_result() < 5:\r\n if current_second != int(tm.current_result()):\r\n print '{s} second(s) elapsed.'.format(s=int(tm.current_result()))\r\n current_second = int(tm.current_result())\r\n\r\n print\r\n print '** Changing Timer unit and printing last result:'\r\n tm.unit = 'ms'\r\n print tm.last_result()",
"def main_loop(self):\n dt = 0\n self.clock.tick(FPS)\n while not self.done:\n self.event_loop()\n self.update(dt)\n self.render()\n dt = self.clock.tick(FPS) / 1000.0",
"def print_time_elapsed(self):\r\n stop_time = time.time()\r\n elapsed_time = stop_time - self.start_time\r\n print(f\"-- time elapsed: {elapsed_time:.5f} s\", flush=True)",
"def run(self):\r\n\r\n # t=0 is singular point\r\n\r\n print 'Time of laboratory clock Tw =', self.tick\r\n tt = self.tmp\r\n ll = self.lst\r\n car = self.interaction(self.carr)\r\n ll.item_run(tt, self.tick, car)\r\n tt = tt.next\r\n\r\n # run of local time\r\n\r\n while not tt is None:\r\n\r\n if tt.dedicated_node:\r\n self.tick = self.tick + 1\r\n print 'Time of laboratory clock Tw =', self.tick\r\n\r\n # self.move() # It is classical motion of particle (example).\r\n\r\n self.move_reset()\r\n car = self.interaction(self.carr)\r\n\r\n ll = self.lst\r\n while not ll is None:\r\n ll.item_run(tt, self.tick, car)\r\n ll = ll.right\r\n\r\n tt = tt.next",
"def stopwatch(message):\n t0 = time.time()\n try:\n yield\n finally:\n t1 = time.time()\n log.debug('Total elapsed time for %s: %.3f', message, t1 - t0)",
"def watch_loop(self):\n # Double threaded function that allows to stop the loop mid execution\n def repeatIt():\n # reset UI and flag before starting loop\n self.resetLabels()\n self.reset_scrollbar()\n # enable stop button\n self.btnStop.config(state=\"normal\")\n # disable button while loop is running\n self.btnStart.config(state=\"disabled\")\n self.txtLoop.config(state=\"disabled\", textvariable=self.loopCounterUI)\n self.labelLoop.config(text=\"Loop Count: \")\n\n while self.loopCount.get() > 0:\n # move scrollbar to bottom\n self.testCanvas.yview_moveto(0)\n # count the loop\n self.loopCounterUI.set(self.loopCounterUI.get() + 1)\n\n # Run the test cases\n self.runThis()\n\n # Below are just to reset the UI\n if not self.stopLoop:\n print(\"loop not stopped so proceed\")\n # let user know script is stopping\n x = Label(\n self.testFrame, text=f'End of Loop',\n background=self.bgChooser(),\n foreground=\"#630984\",\n font=self.boldFont)\n x.pack(fill=X)\n # flag gor BG and labels\n self.bgCounter += 1\n self.LabelLists.append(x)\n # allow window to catch up\n self.tkRoot.update()\n self.update_scrollbar()\n else:\n print(\"loop has been stopped so not gonna print End of Loop\")\n\n # pause before restarting loop\n self.loopCount.set(self.loopCount.get()-1)\n time.sleep(1)\n\n # disable stop button\n self.btnStop.config(state=\"disabled\")\n # re-enable button after loop is done\n self.btnStart.config(state=\"normal\")\n self.txtLoop.config(state=\"normal\", textvariable=self.loopCount)\n self.labelLoop.config(text=\"Enter Loop Count: \")\n # self.testCanvas.yview_moveto(0)\n # Let user know the script is done\n if not self.stopLoop:\n # loop did not stopped\n x = Label(\n self.testFrame, text=f'Test is done!',\n background=self.bgChooser(),\n foreground=\"#057224\",\n font=self.boldFont)\n x.pack(fill=X)\n self.bgCounter += 1\n else:\n x = Label(\n self.testFrame, text=f'Test stopped!',\n background=self.bgChooser(),\n foreground=\"#057224\",\n font=self.boldFont)\n x.pack(fill=X)\n self.bgCounter += 1\n self.btnStart.config(state=\"normal\")\n self.txtLoop.config(state=\"normal\", textvariable=self.loopCount)\n self.labelLoop.config(text=\"Enter Loop count: \")\n self.loopCount.set(50000)\n self.LabelLists.append(x)\n # allow window to catch up\n self.tkRoot.update()\n self.update_scrollbar()\n thread = threading.Thread(target=repeatIt)\n thread.start()",
"def loop(self):\n while self.running:\n self.clock.tick(self.fps)\n self.events()\n self.update()\n self.draw()\n self.game_over()",
"def timer_handler():\r\n \r\n global elapsed_time\r\n elapsed_time += 1",
"def run(self):\n while not self.terminate_event.is_set():\n while self.count > 0 and self.start_event.is_set() and self.interval > 0:\n if self.tick_log:\n if (self.count * self.sleep_chunk - int(self.count * self.sleep_chunk)) == 0.0:\n self.log.debug(\"{name} countdown: {tick} ({interval}s @ step {step:.2f}s)\"\n .format(name=self.name, tick=self.count,\n interval=self.interval, step=self.sleep_chunk))\n if self.reset_event.wait(self.sleep_chunk):\n self.reset_event.clear()\n self.count = self.interval / self.sleep_chunk\n self.count -= 1\n if self.count <= 0:\n self._callback()\n self.count = self.interval / self.sleep_chunk",
"def run_loop(self,queue):\r\n duration = []\r\n start = time.time()\r\n time.sleep(0.01)\r\n while True:\r\n duration.append(time.time()-start)\r\n print(\"Average Duration: {}ms\".format(1000*sum(duration)/len(duration)))\r\n start = time.time()\r\n if self._running:\r\n with self._run_lock:\r\n self.run(queue)\r\n time.sleep(0.001) # allow lock to be freed\r",
"def tick(self):\n if self.start > 0:\n self.time -= 1\n else:\n self.time += 1\n if self.time < 0:\n self.timer.stop()\n if self.signal_params is None:\n self.time_out.emit()\n else:\n self.time_out[object].emit(self.signal_params)\n else:\n self.showInterval()",
"def update_timer(self):\r\n frmt_time = \"%d:%02d\" % (self.time_minutes, self.time_seconds)\r\n self.time_seconds += 1\r\n if self.time_seconds == 60:\r\n self.time_seconds = 0\r\n self.time_minutes += 1\r\n\r\n self.mainWidget.statusLabel.setText(\"{} {} --- {} {} --- {}\".format(self.elapsedTimeString,\r\n frmt_time,\r\n self.freeSpaceString,\r\n get_free_space(self.config.videodir),\r\n self.recordingString))",
"def refresh(self):\n # If the timer is still running\n if self.is_running:\n # Clear time elapsed\n self.time_elapsed = 0\n # Get the delta (in seconds) between all of the stop and start times\n # and add them to time elapsed.\n for entry in xrange(0, len(self.stop_times)):\n delta = (self.stop_times[entry] - self.start_times[entry])\n self.time_elapsed += delta.seconds + delta.days*86400\n # Added these two lines to fix a weird bug that added an extra\n # second after resuming the timer.\n if len(self.stop_times) > 1:\n self.time_elapsed -= 1\n # Get the time delta between now and the last start time and\n # format it as an integer of seconds.\n delta = datetime.now() - self.start_times[-1]\n delta_in_seconds = delta.seconds + delta.days*86400\n # Add the time delta (in seconds) to time_elapsed.\n self.time_elapsed += delta_in_seconds\n self.label.set_text(self.format_timer(self.time_elapsed))\n # If the indicator exists, set it to the time elapsed as well\n if not self.indicator_label == None:\n formatted_time = self.format_timer(self.time_elapsed)\n self.indicator_label.set_label(formatted_time)\n # Return true if the timer is running to keep he callback going\n return self.is_running",
"def run():\n\n window = get_window()\n\n # Used in some unit test\n if os.environ.get('ARCADE_TEST'):\n window.on_update(window._update_rate)\n window.on_draw()\n elif window.headless:\n # We are entering headless more an will emulate an event loop\n import time\n\n # Ensure the initial delta time is not 0 to be\n # more in line with how a normal window works.\n delta_time = window._draw_rate\n last_time = time.perf_counter()\n\n # As long as we have a context --\n while window.context:\n # Select active view or window\n active = window.current_view or window\n\n active.on_update(delta_time)\n if window.context:\n active.on_draw()\n\n # windwow could be closed in on_draw\n if window.context:\n window.flip()\n\n now = time.perf_counter()\n delta_time, last_time = now - last_time, now\n else:\n import sys\n if sys.platform != 'win32':\n # For non windows platforms, just do pyglet run\n pyglet.app.run(window._draw_rate)\n else:\n # Ok, some Windows platforms have a timer resolution > 15 ms. That can\n # drop our FPS to 32 FPS or so. This reduces resolution so we can keep\n # FPS up.\n import contextlib\n import ctypes\n from ctypes import wintypes\n\n winmm = ctypes.WinDLL('winmm')\n\n class TIMECAPS(ctypes.Structure):\n _fields_ = (('wPeriodMin', wintypes.UINT),\n ('wPeriodMax', wintypes.UINT))\n\n def _check_time_err(err, func, args):\n if err:\n raise WindowsError('%s error %d' % (func.__name__, err))\n return args\n\n winmm.timeGetDevCaps.errcheck = _check_time_err\n winmm.timeBeginPeriod.errcheck = _check_time_err\n winmm.timeEndPeriod.errcheck = _check_time_err\n\n @contextlib.contextmanager\n def timer_resolution(msecs=0):\n caps = TIMECAPS()\n winmm.timeGetDevCaps(ctypes.byref(caps), ctypes.sizeof(caps))\n msecs = min(max(msecs, caps.wPeriodMin), caps.wPeriodMax)\n winmm.timeBeginPeriod(msecs)\n yield\n winmm.timeEndPeriod(msecs)\n\n with timer_resolution(msecs=10):\n pyglet.app.run(window._draw_rate)",
"def timer(work_log):\n start = time.time()\n print '\\nyou started working at %s\\n' % time.ctime(int(start))\n\n input = raw_input(\"\\ntype 'stop' to stop timer...\\n\")\n while (input != 'stop'):\n input = raw_input(\"\\ntype 'stop' to stop timer...\\n\")\n work = raw_input(\"\\nwhat'd you work on?\\n\")\n stop = time.time()\n print_to_file(start, stop, (stop-start), work, work_log)"
] | [
"0.67962104",
"0.6714543",
"0.6504831",
"0.64995676",
"0.6336717",
"0.6330287",
"0.6256835",
"0.62167007",
"0.6187998",
"0.6170958",
"0.6117941",
"0.607546",
"0.60544944",
"0.6039015",
"0.60389596",
"0.6019436",
"0.6006544",
"0.5976301",
"0.5935193",
"0.5933771",
"0.59141564",
"0.59115285",
"0.5893195",
"0.58659047",
"0.5797687",
"0.5778",
"0.5761752",
"0.57458854",
"0.5738301",
"0.57327694"
] | 0.70103276 | 0 |
return True if |val| is an instance of list, False otherwise | def _is_list(val):
return isinstance(val, list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_list(value):\n return isinstance(value, list)",
"def is_list(value):\n return isinstance(value, list) or None",
"def _is_list(item):\n return isinstance(item, list)",
"def is_list(obj):\n return type(obj) is list",
"def is_list ( self, s ):\r\n\t\treturn isinstance ( s, type( list () ) )",
"def isList(data):\n\ttry:\n\t\tfrom types import ListType\n\t\tif type(data) == ListType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type([]):\n\t\t\treturn True\n\treturn False",
"def isList(obj):\n return type(obj)==types.ListType",
"def is_list(self) -> bool:\n return False",
"def isList(x):\n \n return ( type(x) == list ) # True if the type of x is a list",
"def _is_list(self):\n # TODO\n if self.is_int():\n return self.int() == 0\n else:\n return self.size_words() == 2 and self.tag() == 0 and self.field(1)._is_list()",
"def isList(self, item):\n\t retval = False\n\t if type(item) in (ListType, TupleType) :\n\t retval = True",
"def _is_list(arg):\n return isinstance(arg, collections.Sequence) and not _is_string(arg)",
"def isList(l):\r\n return hasattr(l, '__iter__') \\\r\n or (type(l) in (types.ListType, types.TupleType))",
"def _list_like(self, value):\n return (not hasattr(value, \"strip\") and\n (hasattr(value, \"__getitem__\") or\n hasattr(value, \"__iter__\")))\n # return is_sequence(value) # use from pandas.core.common import is_sequence",
"def is_list(self) -> bool:\n if self.is_list_of_list: # pylint: disable=R1705\n return False\n else:\n return bool(AnnotationWrapper.list_field_re.match(self.data))",
"def is_list_like(value):\n if is_iterable(value) and not isinstance(value, six.string_types):\n return True\n\n else:\n return False",
"def is_list(s_list):\n return isa(s_list, List)",
"def is_list(self):\n answer = self._call('is_list')\n return answer.yes",
"def is_listlike(x: Any) -> bool:\r\n return (isinstance(x, (list, tuple)))",
"def is_list_of_list(self) -> bool:\n return bool(AnnotationWrapper.list_of_list_re.match(self.data))",
"def _list(self, val, fld):\n if isinstance(val, (list, tuple)):\n if len(val) == 1:\n return fld == val[0]\n else:\n return fld.in_(val)\n else:\n return fld == val",
"def is_list(annotation):\n\n annotation_origin = getattr(annotation, \"__origin__\", None)\n\n return annotation_origin == list",
"def is_list_of(seq, expected_type):\n return is_seq_of(seq, expected_type, seq_type=list)",
"def is_tuple_or_list(value):\n return isinstance(value, list) or isinstance(value, tuple)",
"def is_sequence_of_list(items):\n return all(isinstance(item, list) for item in items)",
"def validate_list(types,val,allowed,tname):\n if not len(types): return TYPE_MISMATCH\n if type(val) not in TYPES[tname]: raise Exception('unknown type')\n for v in val:\n result=VALIDATORS[types[-1]](types[:-1],v,allowed,types[-1])\n if not result: return result\n return True",
"def is_array(val):\n return (\n isinstance(val, tuple) or \\\n isinstance(val, dict) or \\\n isinstance(val, list)\n )",
"def list_type(verifield, required):\n if verifield is None: return True\n if not isinstance(verifield, list): return False\n all_of = [value or True for value in verifield if isinstance(value, required) or value is None]\n return not verifield or (all(all_of or [False]) and len(all_of) == len(verifield))",
"def list_check(lst):\n for item in lst:\n if type(item) != list:\n return False\n return True",
"def _is_list(arg):\n if isinstance(arg, dict):\n return False\n if isinstance(arg, str): # Python 3-only, as str has __iter__\n return False\n return (\n not _has_method(arg, \"strip\")\n and _has_method(arg, \"__getitem__\")\n or _has_method(arg, \"__iter__\")\n )"
] | [
"0.81332123",
"0.7750742",
"0.767848",
"0.7669833",
"0.76221544",
"0.7603478",
"0.75646144",
"0.7458807",
"0.7454284",
"0.7441061",
"0.7392627",
"0.73333895",
"0.7226106",
"0.7216461",
"0.7105663",
"0.70489925",
"0.7037061",
"0.7035671",
"0.7025254",
"0.6976099",
"0.6933247",
"0.688682",
"0.6871853",
"0.67944044",
"0.6727481",
"0.67076856",
"0.6684601",
"0.6659844",
"0.66309226",
"0.66216445"
] | 0.89659095 | 0 |
return True if |val| is an instance of dict, False otherwise | def _is_dict(val):
return isinstance(val, dict) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isdict(val: Any) -> bool:\n return isinstance(val, MutableMapping)",
"def is_dict(value):\n return isinstance(value, dict)",
"def isdictinstance(obj):\n return isinstance(obj, dict) or isinstance(obj, DotDict)",
"def is_dict(obj):\n return type(obj) == type({})",
"def _is_dict(item):\n return isinstance(item, dict)",
"def is_dictionary(obj):\n return type(obj) is dict",
"def is_dict(self) -> bool:\n return True",
"def isDict(data):\n\ttry:\n\t\tfrom types import DictType\n\t\tif type(data) == DictType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type({}):\n\t\t\treturn True\n\treturn False",
"def is_typed_dict(self) -> bool:\n return True",
"def dict_type(verifield, required):\n if verifield is None: return True\n if not isinstance(verifield, dict): return False\n all_of = [value or True for value in verifield.values() if isinstance(value, required) or value is None]\n return not verifield or (all(all_of or [False]) and len(all_of) == len(verifield))",
"def is_dictionary_subclass(obj):\n return (hasattr(obj, '__class__') and\n issubclass(obj.__class__, dict) and not is_dictionary(obj))",
"def has_value(value):\n return IsDictContainingValue(wrap_matcher(value))",
"def quacks_like_dict(object):\n return isinstance(object, Mapping)",
"def eval_dict(self, value):\n\n okay = True\n if all(ast_class(k) == 'Str' for k in value.keys):\n count = 0\n for v in value.values:\n if not self.eval_value(v):\n okay = False\n break\n count += 1\n return okay",
"def quacks_like_dict(object):\n return isinstance(object, collections.Mapping)",
"def rule_00_config_is_dict(session):\n return isinstance(session[\"config\"], dict)",
"def _is_valid_dict(content_type: str) -> bool:\n content_type = content_type.strip()\n\n if not content_type.startswith(\"pt:dict\"):\n return False\n\n if not _has_matched_brackets(content_type):\n return False\n\n if not _has_brackets(content_type):\n return False\n\n sub_types = _get_sub_types_of_compositional_types(content_type)\n if len(sub_types) != 2:\n return False\n\n sub_type_1 = sub_types[0]\n sub_type_2 = sub_types[1]\n return _is_valid_pt(sub_type_1) and _is_valid_pt(sub_type_2)",
"def is_sequence_of_dict(items):\n return all(isinstance(item, dict) for item in items)",
"def check_for_dict(check):",
"def validate_dict(types,val,allowed,typ):\n if not len(types): return TYPE_MISMATCH\n if str(type(val)) not in typ['list']: raise(Exception('unknown type'))\n for k,v in val.items():\n result=VALIDATORS[types[-1]](types[:-1],v,allowed,types[-1])\n if not result: return result\n return True",
"def has_dict(obj_type, obj, tolerance=25):\n ancestor_types = deque()\n while obj_type is not type and tolerance:\n ancestor_types.appendleft(obj_type)\n obj_type = type(obj_type)\n tolerance -= 1\n for ancestor in ancestor_types:\n __dict__ = getattr(ancestor, '__dict__', None)\n if __dict__ is not None:\n if '__dict__' in __dict__:\n return True\n return hasattr(obj, '__dict__')",
"def can_insert(data):\n return isinstance(data, dict)",
"def verifyDictTypes( template, dictToCheck ):\n for key in dictToCheck:\n if not ( ( isinstance( dictToCheck[ key ], list ) and\n isinstance( template[ key ], list ) ) or\n ( isinstance( dictToCheck[ key ], dict ) and\n isinstance( template[ key ], dict ) ) or\n ( isinstance( dictToCheck[ key ], template[ key ] ) ) ):\n return False\n\n return True",
"def is_valid(val, val_type=\"key\"):\n if val_type == \"key\":\n if not isinstance(val, str):\n raise ValueError(f\"Key [{val}] must be of type str.\")\n return len(val) <= config.MAX_KEY_LEN\n elif val_type == \"value\":\n if isinstance(val, dict):\n return sys.getsizeof(val) <= config.MAX_VALUE_SIZE\n raise ValueError(f\"Value [{val}] must be of type dict.\")",
"def _is_json_object(blob):\n try:\n return isinstance(json.loads(blob), dict)\n except ValueError:\n return False",
"def _validate_dict_entry(self, dict_entry):\r\n try:\r\n # Type-check all of the type-critical items.\r\n if (\r\n type(dict_entry[\"id\"]) == int and\r\n type(dict_entry[\"date\"]) == datetime.date and\r\n type(dict_entry[\"time\"]) == datetime.time and\r\n type(dict_entry[\"datetime\"]) == datetime.datetime and\r\n type(dict_entry[\"duration\"]) == datetime.timedelta):\r\n return True\r\n else:\r\n return False\r\n # end if\r\n except Exception as err:\r\n _z_exc(\"logentry.py/_validate_dict_entry\", err)\r\n # end try\r",
"def dict_support_required(self):\n\t\treturn self.typemanager.has_dicts",
"def test_dictionary(self):\n self.assertIsInstance(self.test1json, dict)",
"def test_obj_dict(self):\n obj = storage.all()\n self.assertIsInstance(obj, dict)",
"def is_valid_value(self, value: Any) -> bool:\n return self.type_registry.is_valid_nested(value)"
] | [
"0.81098014",
"0.7984968",
"0.78048414",
"0.7678112",
"0.7643852",
"0.7478924",
"0.7321905",
"0.7204761",
"0.71674895",
"0.6830731",
"0.68238515",
"0.6810352",
"0.67759424",
"0.66980416",
"0.6552115",
"0.65379673",
"0.6410042",
"0.63863635",
"0.6381625",
"0.6375066",
"0.631575",
"0.6273137",
"0.624079",
"0.61973953",
"0.6189864",
"0.60490674",
"0.603635",
"0.6004135",
"0.5940965",
"0.59248185"
] | 0.8864714 | 0 |
return True if |wildcard| string matches |s| string. A valid wildcard | def _is_wildcard_match(s, wildcard):
wildcard = wildcard.strip()
glob_pat = re.compile(r'\*(:(?P<type>\w+))?$')
m = glob_pat.match(wildcard)
if m:
if m.group('type'):
type_to_meth = globals()['__builtins__']
type_to_meth = {k:v for k,v in type_to_meth.items()
if k in ['str','int','float','bool']}
try:
return isinstance(s, type_to_meth[m.group('type')])
except KeyError:
raise InvalidWildcardError("{} is an invalid type in {}".format(
m.group('type'), wildcard))
return True
raise InvalidWildcardError(wildcard) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __reWildcard(self, regexp, string):\n regexp = re.sub(\"\\*+\", \"*\", regexp)\n match = True\n if regexp.count(\"*\") == 0:\n if regexp == string:\n return True\n else:\n return False\n blocks = regexp.split(\"*\")\n start = \"\"\n end = \"\"\n if not regexp.startswith(\"*\"):\n start = blocks[0]\n if not regexp.endswith(\"*\"):\n end = blocks[-1]\n if start != \"\":\n if string.startswith(start):\n blocks = blocks[1:]\n else:\n return False\n if end != \"\":\n if string.endswith(end):\n blocks = blocks[:-1]\n else:\n return False\n blocks = [block for block in blocks if block != \"\"]\n if blocks == []:\n return match\n for block in blocks:\n i = string.find(block)\n if i == -1:\n return False\n string = string[i + len(block):]\n return match",
"def test_match_any_wildcard_in_literal(self):\n qs = '\"Foo t*\"'\n qs_escaped, wildcard = wildcard_escape(qs)\n\n self.assertEqual(\n qs_escaped, r'\"Foo t\\*\"', \"Wildcard should be escaped\"\n )\n self.assertFalse(wildcard, \"Wildcard should not be detected\")\n self.assertIsInstance(\n Q_(\"match\", \"title\", qs),\n type(index.Q(\"match\", title=r'\"Foo t\\*\"')),\n \"Wildcard Q object should not be generated\",\n )",
"def test_match_any_wildcard_is_present(self):\n qs = \"Foo t*\"\n qs_escaped, wildcard = wildcard_escape(qs)\n\n self.assertTrue(wildcard, \"Wildcard should be detected\")\n self.assertEqual(qs, qs_escaped, \"The querystring should be unchanged\")\n self.assertIsInstance(\n Q_(\"match\", \"title\", qs),\n type(index.Q(\"wildcard\", title=qs)),\n \"Wildcard Q object should be generated\",\n )",
"def wildcard_match(item, base, wildcard):\n if wildcard.startswith(\"**/\"):\n wildcard = wildcard[3:]\n for base_element in base.split(\"/\"):\n if fnmatch.fnmatch(base_element, wildcard):\n return True\n return False\n else:\n return fnmatch.fnmatch(item, wildcard)",
"def match(pattern, string):\n if not len(pattern) and not len(string):\n return True\n\n if len(pattern) > 1 and pattern[0] == '*' and len(string) == 0:\n return False\n\n if (len(pattern) > 0 and pattern[0] == '?') or \\\n (len(pattern) != 0 and len(string) != 0 and pattern[0] == string[0]):\n return match(pattern[1:], string[1:])\n\n if len(pattern) != 0 and pattern[0] == '*':\n return match(pattern[1:], string) or match(pattern, string[1:])\n\n return False",
"def test_multiple_match_any_wildcard_in_literal(self):\n qs = '\"Fo*o t*\"'\n qs_escaped, wildcard = wildcard_escape(qs)\n\n self.assertEqual(\n qs_escaped, r'\"Fo\\*o t\\*\"', \"Both wildcards should be escaped\"\n )\n self.assertFalse(wildcard, \"Wildcard should not be detected\")\n self.assertIsInstance(\n Q_(\"match\", \"title\", qs),\n type(index.Q(\"match\", title=r'\"Fo\\*o t\\*\"')),\n \"Wildcard Q object should not be generated\",\n )",
"def isMatch(self, s: str, p: str) -> bool:\n def is_match(self, text, pattern):\n if not pattern:\n return not text\n\n first_match = bool(text) and pattern[0] in {text[0], '.'}\n\n if len(pattern) >= 2 and pattern[1] == '*':\n return (self.isMatch(text, pattern[2:]) or\n first_match and self.isMatch(text[1:], pattern))\n else:\n return first_match and self.isMatch(text[1:], pattern[1:])\n\n def isMatch(self, text, pattern):\n memo = {}\n\n def dp(i, j):\n if (i, j) not in memo:\n if j == len(pattern):\n ans = i == len(text)\n else:\n first_match = i < len(text) and pattern[j] in {text[i], '.'}\n if j + 1 < len(pattern) and pattern[j + 1] == '*':\n ans = dp(i, j + 2) or first_match and dp(i + 1, j)\n else:\n ans = first_match and dp(i + 1, j + 1)\n\n memo[i, j] = ans\n return memo[i, j]\n\n return dp(0, 0)",
"def stringcheck(self, rule, string):\n if not \"*\" in rule:\n return rule in string\n elif rule[0] == \"*\":\n return string.endswith(rule[1:])\n elif rule[-1] == \"*\":\n return string.startswith(rule[:-1])\n else:\n start, end = rule.split(\"*\")\n return string.startswith(start) and string.endswith(end)",
"def test_wildcards_inside_outside_multiple_literals(self):\n qs = '\"Fo?\" s* \"yes*\" o?'\n qs_escaped, wildcard = wildcard_escape(qs)\n\n self.assertEqual(\n qs_escaped,\n r'\"Fo\\?\" s* \"yes\\*\" o?',\n \"Wildcards in literal should be escaped\",\n )\n self.assertTrue(wildcard, \"Wildcard should be detected\")\n\n self.assertIsInstance(\n Q_(\"match\", \"title\", qs),\n type(index.Q(\"wildcard\", title=r'\"Fo\\?\" s* \"yes\\*\" o?')),\n \"Wildcard Q object should be generated\",\n )",
"def test_handle_wildcard(self):\n sequence1 = 'ATCG'\n sequence2 = 'ATNG'\n sequence3 = 'NNCN'\n self.assertEqual(handle_wildcard(sequence1), ['ATCG'])\n self.assertEqual(handle_wildcard(sequence2), [\"%AT_G%\"])\n self.assertEqual(handle_wildcard(sequence3), [\"%__C_%\"])",
"def test_wildcards_both_inside_and_outside_literal(self):\n qs = '\"Fo? t*\" said the *'\n qs_escaped, wildcard = wildcard_escape(qs)\n\n self.assertEqual(\n qs_escaped,\n r'\"Fo\\? t\\*\" said the *',\n \"Wildcards in literal should be escaped\",\n )\n self.assertTrue(wildcard, \"Wildcard should be detected\")\n self.assertIsInstance(\n Q_(\"match\", \"title\", qs),\n type(index.Q(\"wildcard\", title=r'\"Fo\\? t\\*\" said the *')),\n \"Wildcard Q object should be generated\",\n )",
"def match(pattern, target):\n pattern = ''.join('.*' if c == '*' else re.escape(c) for c in pattern)\n return bool(re.match('^' + pattern + '$', target))",
"def glob_match(value, pat, doublestar=False, ignorecase=False, path_normalize=False):\n if ignorecase:\n value = value.lower()\n pat = pat.lower()\n if path_normalize:\n value = value.replace('\\\\', '/')\n pat = pat.replace('\\\\', '/')\n return _translate(pat, doublestar=doublestar).match(value) is not None",
"def test_mixed_wildcards_in_literal(self):\n qs = '\"Fo? t*\"'\n qs_escaped, wildcard = wildcard_escape(qs)\n\n self.assertEqual(\n qs_escaped, r'\"Fo\\? t\\*\"', \"Both wildcards should be escaped\"\n )\n self.assertFalse(wildcard, \"Wildcard should not be detected\")\n self.assertIsInstance(\n Q_(\"match\", \"title\", qs),\n type(index.Q(\"match\", title=r'\"Fo\\? t\\*\"')),\n \"Wildcard Q object should not be generated\",\n )",
"def check(self, s, field='word', cats=None):\n f = self[field]\n if cats is None:\n # treat s as plain regex\n return regex.search(s, f) is not None\n # s is a sound change rule\n try:\n # parse s\n s = sound_changer.parse_rule(s, cats)\n except AttributeError:\n # s is a dict (i.e. already parsed)\n pass\n return bool(sound_changer.find_matches(f, s, cats)[0])",
"def match(self, s):\n self.matches = self.re.search(s)\n return self.matches",
"def wildcard(s, star_min=1):\n\n def _feed_parts(input_parts):\n for part in input_parts:\n if part == \"*\":\n if star_min == 0:\n yield \".*\"\n elif star_min == 1:\n yield \".+\"\n else:\n yield f\".{{{star_min},}}\"\n elif part == \"?\":\n yield \".\"\n else:\n yield re.escape(part)\n\n return \"\".join(_feed_parts(re.split(r'([\\?\\*])', s)))",
"def match(self, s):\n if self.re.match(s):\n self.list.append(s)\n return True\n else: return False",
"def isValid(self, s):\n for valid in self.validTargets:\n if (valid[0] in s):\n return True\n return False",
"def test_searchWildcard(self):\n self.assertFalse(\n self.server.search_UID([b'2:3'], self.seq, self.msg, (1, 1234)))\n # 2:* should get translated to 2:<max UID> and then to 1:2\n self.assertTrue(\n self.server.search_UID([b'2:*'], self.seq, self.msg, (1, 1234)))\n self.assertTrue(\n self.server.search_UID([b'*'], self.seq, self.msg, (1, 1234)))",
"def search(self, q):\n for x in self.strings:\n if q in x:\n return True\n \n return False\n\n\n pass",
"def has_asterisk(self, string):\n if self.debug:\n print('has asterisk'+lineno())\n\n if '*' in string:\n if self.debug:\n print('has an asterisk '+lineno())\n\n return True\n\n return False",
"def isMatch(s: str, p: str):\n # '.*' matches any string.\n if p == '.*':\n return True\n # Finished both string and pattern!\n if not s and not p:\n return True\n # Repeat character zero times\n if len(p) > 1:\n if not s and p[1] == '*':\n return isMatch(s, p[2:])\n # Finished one of string/pattern but not both.\n if not s or not p:\n return False\n # Pattern of length one \n if len(p) == 1:\n if p[0] == s[0] or p[0] == '.':\n return isMatch(s[1:], p[1:])\n else:\n return False\n # Check if we have '*' character\n if p[1] == '*':\n # Zero of preceding character\n if p[0] != '.' and p[0] != s[0]:\n return isMatch(s, p[2:])\n # Characters (not '.') match!\n if p[0] == s[0]:\n if isMatch(s, p[2:]):\n return True\n while p[0] == s[0]:\n s = s[1:]\n if isMatch(s, p[2:]):\n return True\n if not s:\n return False\n return False\n # '.' characte matches any alphabetic character\n if p[0] == '.':\n if isMatch(s, p[2:]):\n return True\n while s and p:\n s = s[1:]\n if isMatch(s, p[2:]):\n return True\n return False\n # If first character matches (or is '.'), recursively\n # check smaller pattern/string\n if p[0] == s[0] or p[0] == '.':\n return isMatch(s[1:], p[1:])\n return False",
"def issubstring(substring, string):\n return substring in string",
"def is_wildcard(obj):\n return isinstance(obj, Symbol) and obj == Symbol('*')",
"def test_name_matching(string, matches: bool):\n assert (re.fullmatch(pattern, string) is not None) == matches",
"def test_wildcard_at_opening_of_string(self):\n with self.assertRaises(index.QueryError):\n wildcard_escape(\"*nope\")\n\n with self.assertRaises(index.QueryError):\n Q_(\"match\", \"title\", \"*nope\")",
"def match(pattern, s):\n # The regexp compilation caching is inlined in both Match and Search for\n # performance reasons; factoring it out into a separate function turns out\n # to be noticeably expensive.\n if pattern not in _regexp_compile_cache:\n _regexp_compile_cache[pattern] = sre_compile.compile(pattern)\n return _regexp_compile_cache[pattern].match(s)",
"def contains(self, searchstr: str):\n for x in self.sa:\n if searchstr in x:\n return True\n pass",
"def match_regex_1(s, r):\n # Case: string is empty.\n if not s:\n if not r:\n return True\n if r[0] == '*':\n return match_regex_1(s, r[1:])\n return False\n # Case: string is not empty.\n if not r:\n return False\n regex_instruction = r[0]\n if regex_instruction in ('.', s[0]):\n return match_regex_1(s[1:], r[1:])\n if regex_instruction == '*':\n return match_regex_1(s[1:], r[1:]) or match_regex_1(s[1:], r)\n return False"
] | [
"0.715021",
"0.67261773",
"0.6713398",
"0.6707344",
"0.6571823",
"0.6482795",
"0.6318864",
"0.6266622",
"0.623342",
"0.6226286",
"0.6225357",
"0.61540484",
"0.61485624",
"0.6146087",
"0.61196405",
"0.59138566",
"0.59130514",
"0.5899484",
"0.58328605",
"0.5817263",
"0.5811018",
"0.57858694",
"0.5785483",
"0.575956",
"0.57477194",
"0.57455075",
"0.5693693",
"0.5646263",
"0.5624196",
"0.5621989"
] | 0.76775545 | 0 |
return True if regex pattern string |pat| matches string |s|. A valid | def _is_regex_match(s, pat):
pat = pat.rstrip()
m = re.search(Settings._REPAT, pat)
if m:
flags_combined = 0
if m.group('flag'):
char_to_flag = {
'A':re.A, 'I':re.I, 'L':re.L, 'M':re.M, 'S':re.S, 'X':re.X}
for flag in list(m.group('flag')):
flags_combined |= char_to_flag[flag]
return bool(re.search(m.group('pat'), s, flags_combined))
raise InvalidRegexError(pat) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isMatch(self, s: str, p: str) -> bool:\n def is_match(self, text, pattern):\n if not pattern:\n return not text\n\n first_match = bool(text) and pattern[0] in {text[0], '.'}\n\n if len(pattern) >= 2 and pattern[1] == '*':\n return (self.isMatch(text, pattern[2:]) or\n first_match and self.isMatch(text[1:], pattern))\n else:\n return first_match and self.isMatch(text[1:], pattern[1:])\n\n def isMatch(self, text, pattern):\n memo = {}\n\n def dp(i, j):\n if (i, j) not in memo:\n if j == len(pattern):\n ans = i == len(text)\n else:\n first_match = i < len(text) and pattern[j] in {text[i], '.'}\n if j + 1 < len(pattern) and pattern[j + 1] == '*':\n ans = dp(i, j + 2) or first_match and dp(i + 1, j)\n else:\n ans = first_match and dp(i + 1, j + 1)\n\n memo[i, j] = ans\n return memo[i, j]\n\n return dp(0, 0)",
"def regex_match(text, pattern):\n try:\n pattern = re.compile(\n pattern,\n flags=re.IGNORECASE + re.UNICODE + re.MULTILINE,\n )\n except BaseException:\n return False\n return pattern.search(text) is not None",
"def _is_valid_regex(regex_pattern: str, text: str) -> bool:\n match = re.match(regex_pattern, text)\n return match is not None",
"def match(pattern, s):\n # The regexp compilation caching is inlined in both Match and Search for\n # performance reasons; factoring it out into a separate function turns out\n # to be noticeably expensive.\n if pattern not in _regexp_compile_cache:\n _regexp_compile_cache[pattern] = sre_compile.compile(pattern)\n return _regexp_compile_cache[pattern].match(s)",
"def validate_regex(self, pattern, flags=0):\r\n try:\r\n re.compile(pattern, flags)\r\n return False\r\n except:\r\n errormsg(_(\"Invalid Regular Expression!\"))\r\n error(traceback.format_exc())\r\n return True",
"def match(self, regexp):\n try:\n self.rematch = regexp.match(self.matchstring)\n except AttributeError:\n self.rematch = re.match(regexp, self.matchstring)\n return bool(self.rematch)",
"def isMatch(s: str, p: str):\n # '.*' matches any string.\n if p == '.*':\n return True\n # Finished both string and pattern!\n if not s and not p:\n return True\n # Repeat character zero times\n if len(p) > 1:\n if not s and p[1] == '*':\n return isMatch(s, p[2:])\n # Finished one of string/pattern but not both.\n if not s or not p:\n return False\n # Pattern of length one \n if len(p) == 1:\n if p[0] == s[0] or p[0] == '.':\n return isMatch(s[1:], p[1:])\n else:\n return False\n # Check if we have '*' character\n if p[1] == '*':\n # Zero of preceding character\n if p[0] != '.' and p[0] != s[0]:\n return isMatch(s, p[2:])\n # Characters (not '.') match!\n if p[0] == s[0]:\n if isMatch(s, p[2:]):\n return True\n while p[0] == s[0]:\n s = s[1:]\n if isMatch(s, p[2:]):\n return True\n if not s:\n return False\n return False\n # '.' characte matches any alphabetic character\n if p[0] == '.':\n if isMatch(s, p[2:]):\n return True\n while s and p:\n s = s[1:]\n if isMatch(s, p[2:]):\n return True\n return False\n # If first character matches (or is '.'), recursively\n # check smaller pattern/string\n if p[0] == s[0] or p[0] == '.':\n return isMatch(s[1:], p[1:])\n return False",
"def match(self, s):\n self.matches = self.re.search(s)\n return self.matches",
"def REGEXMATCH(text, regular_expression):\n return bool(re.search(regular_expression, text))",
"def match(pattern, string):\n if not len(pattern) and not len(string):\n return True\n\n if len(pattern) > 1 and pattern[0] == '*' and len(string) == 0:\n return False\n\n if (len(pattern) > 0 and pattern[0] == '?') or \\\n (len(pattern) != 0 and len(string) != 0 and pattern[0] == string[0]):\n return match(pattern[1:], string[1:])\n\n if len(pattern) != 0 and pattern[0] == '*':\n return match(pattern[1:], string) or match(pattern, string[1:])\n\n return False",
"def search(self, regexp):\n try:\n self.rematch = regexp.search(self.matchstring)\n except AttributeError:\n self.rematch = re.search(regexp, self.matchstring)\n return bool(self.rematch)",
"def _verify_format(s, format):\n r = re.compile(format)\n if r.match(s) is not None:\n return True\n return False",
"def test_name_matching(string, matches: bool):\n assert (re.fullmatch(pattern, string) is not None) == matches",
"def check(self, s, field='word', cats=None):\n f = self[field]\n if cats is None:\n # treat s as plain regex\n return regex.search(s, f) is not None\n # s is a sound change rule\n try:\n # parse s\n s = sound_changer.parse_rule(s, cats)\n except AttributeError:\n # s is a dict (i.e. already parsed)\n pass\n return bool(sound_changer.find_matches(f, s, cats)[0])",
"def validaURL(url: AnyStr) -> bool:\n\n return re.compile(patternURL).search(url) != None # Linea 1",
"def match_regex_1(s, r):\n # Case: string is empty.\n if not s:\n if not r:\n return True\n if r[0] == '*':\n return match_regex_1(s, r[1:])\n return False\n # Case: string is not empty.\n if not r:\n return False\n regex_instruction = r[0]\n if regex_instruction in ('.', s[0]):\n return match_regex_1(s[1:], r[1:])\n if regex_instruction == '*':\n return match_regex_1(s[1:], r[1:]) or match_regex_1(s[1:], r)\n return False",
"def regex(value, pattern):\r\n c_pattern = re.compile(r\"\\b\" + pattern.lower() + r\"\\b\")\r\n return c_pattern.search(value) is not None",
"def contains(text: str, pattern: str) -> bool:\n assert isinstance(text, str), 'text is not a string: {}'.format(text)\n assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)\n # COMPLEXITY: O(n) b/c we are using find_index method which is O(n)\n return find_index(text, pattern) is not None",
"def validate_string_match(self, pattern, file):\r\n try:\r\n file_open = open(file, 'r')\r\n except:\r\n logging.info(\"file not found\")\r\n return -1\r\n file_data = file_open.read()\r\n ret_out = re.match(pattern, file_data)\r\n if ret_out:\r\n return True, ret_out\r\n else:\r\n return False, ret_out",
"def is_regex_in_string(regex, regex_string):\n try:\n match = re.search(regex, regex_string)\n does_nothing(match.group())\n return True;\n except Exception, e:\n return False;",
"def test_pattern(pattern, fields):\n if not pattern: # \"empty\" pattern\n return True\n\n def eval_exp(text):\n m = re.match(r'^(\\$(\\d+))?(!)?/([^/]*)/$', text)\n try:\n if m: # regular expression\n _, num, neg, pat = m.groups()\n num = int(num) if num else 0 # if no `$i` specified, default to `$0`\n m = re.search(pat, fields[num])\n logging.info(u\"regex: '%s' %s~ /%s/\" % (fields[num], neg or u'', pat))\n return bool(m) != bool(neg)\n else: # expression\n exp = translate_fields(text, fields, u'_') # replace non-exist `$i` with u'_'\n logging.info(u'exp: %s' % exp)\n return bool(exp and eval(exp))\n except Exception, e:\n logging.debug(unicode(e))\n return False\n\n if u',' not in pattern: # \"regular expression\" or \"expression\" pattern\n return eval_exp(pattern)\n else: # \"begpat, endpat\" pattern\n global SWITCH_ON\n\n value = False\n\n begpat, endpat = [s.strip() for s in pattern.split(u',')]\n if eval_exp(begpat):\n SWITCH_ON = True\n if SWITCH_ON:\n value = True\n if eval_exp(endpat):\n SWITCH_ON = False\n\n return value",
"def check_pattern(pattern, token):\n split_token = re.split('\\W+', token, 1)\n if split_token[0] == '':\n split_token = split_token[1]\n else:\n split_token = split_token[0]\n return split_token == pattern",
"def regMatch(value, regex):\n if regex == \"*\": # Accounts for python wildcard bug\n regex = \"(.*)\"\n pattern = re.compile(regex)\n match_obj = pattern.search(value)\n return bool(match_obj)",
"def match(cls, text):\r\n return cls.main.pattern.match(text)",
"def validate_string_search(self, pattern, file):\r\n try:\r\n file_open = open(file, 'r')\r\n except:\r\n logging.info(\"file not found\")\r\n return -1\r\n file_data = file_open.read()\r\n ret_out = re.search(pattern, file_data)\r\n if ret_out:\r\n return True, ret_out\r\n else:\r\n return False, ret_out",
"def has_pattern(self, pattern):\n\n pat_len = len(pattern)\n if pat_len > self.text_len:\n raise ValueError(\"Pattern length is bigger than text\")\n\n if self.first_occurence(pattern) == -1:\n return False\n\n return True",
"def __reWildcard(self, regexp, string):\n regexp = re.sub(\"\\*+\", \"*\", regexp)\n match = True\n if regexp.count(\"*\") == 0:\n if regexp == string:\n return True\n else:\n return False\n blocks = regexp.split(\"*\")\n start = \"\"\n end = \"\"\n if not regexp.startswith(\"*\"):\n start = blocks[0]\n if not regexp.endswith(\"*\"):\n end = blocks[-1]\n if start != \"\":\n if string.startswith(start):\n blocks = blocks[1:]\n else:\n return False\n if end != \"\":\n if string.endswith(end):\n blocks = blocks[:-1]\n else:\n return False\n blocks = [block for block in blocks if block != \"\"]\n if blocks == []:\n return match\n for block in blocks:\n i = string.find(block)\n if i == -1:\n return False\n string = string[i + len(block):]\n return match",
"def _pattern_is_simple(pattern):\n return bool(re.match('[\\\\w_]+$', tostring(pattern)))",
"def match(self, s):\n if self.re.match(s):\n self.list.append(s)\n return True\n else: return False",
"def _source_matchpattern_field_string_is_valid_as_regex(self):\n if self.source_matchpattern is None:\n raise RuleError(\"'source_matchpattern' must be a valid regex.\")\n if not regex_is_valid(self.source_matchpattern):\n # print(f\"{self}\")\n raise SourceMatchpatternError(\n \"Value for 'source_matchpattern' must be a valid regex.\"\n )\n return True"
] | [
"0.70966935",
"0.70157164",
"0.6960576",
"0.6815772",
"0.6807737",
"0.67286736",
"0.6649333",
"0.6632213",
"0.6626841",
"0.65312064",
"0.652153",
"0.65018463",
"0.6370927",
"0.63706475",
"0.63171095",
"0.6270721",
"0.6270025",
"0.6266336",
"0.6263732",
"0.6250111",
"0.62382215",
"0.62365776",
"0.6216767",
"0.61515135",
"0.61398745",
"0.61302066",
"0.6123086",
"0.6102496",
"0.6089095",
"0.60889727"
] | 0.8187956 | 0 |
return True if |v| is in |valid_v|. |v| should be a primitive of either int, float, str, or bool. |valid_v| should be a list of any possible legal primitive, wildcard, or regex values. |valid_v| can also be a single primitive value, which will implicitly be converted to a list containing one element. Return False otherwise. | def _is_in_prim(v, valid_v):
if not isinstance(valid_v, list):
valid_v = [valid_v]
for pat in valid_v:
if isinstance(pat, str):
if '*' in pat:
if Settings._is_wildcard_match(v, pat):
return True
elif re.search(Settings._REPAT, pat):
if Settings._is_regex_match(str(v), pat):
return True
if v == pat:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def has(self, v):\n return v in self.values",
"def _primitive_validity_check(v, valid_v):\n\n if not Settings._is_in_prim(v, valid_v):\n raise InvalidSettingError()",
"def is_valid_value(self, value):\n return value in self.values",
"def _is_in_list(l, valid_l):\n\n for elem in l:\n if Settings._is_primitive(elem):\n if not Settings._is_in_prim(elem, valid_l):\n return False\n elif Settings._is_list(elem):\n valid_lists = [l for l in valid_l if isinstance(l, list)]\n if not Settings._is_sublist_in_one_of_lists(elem, valid_lists):\n return False\n elif Settings._is_dict(elem):\n valid_dicts = [d for d in valid_l if isinstance(d, dict)]\n if not Settings._is_dict_in_one_of_dicts(elem, valid_dicts):\n return False\n else:\n raise InvalidSettingError()\n return True",
"def contains_vect(self, v: Tuple[float, float]) -> bool:\n assert len(v) == 2\n return bool(lib.cpBBContainsVect(self, v))",
"def __contains__(self, v):\n for i in self:\n if v in i:\n return True\n False",
"def eval_value(self, v):\n okay = False\n if ast_class(v) == 'Dict':\n # dict\n if self.eval_dict(v):\n okay = True\n elif ast_class(v) == 'List':\n # list\n if self.eval_list(v):\n okay = True\n elif ast_class(v) == 'Str':\n # string\n okay = True\n elif ast_class(v) == 'Name' and v.id in ('True', 'False', 'None'):\n # booleans or None\n okay = True\n elif ast_class(v) == 'Num':\n # numbers\n okay = True\n elif ast_class(v) == 'UnaryOp' and ast_class(v.op) == 'USub' and ast_class(v.operand) == 'Num':\n # negative numbers\n okay = True\n return okay",
"def __check(self, v):\n v = base_type(v)\n if not self._restriction_test(v):\n raise ValueError, \"did not match restricted type\"\n return True",
"def check_list(source, value):\n try:\n return value in json.loads(source)\n except:\n return False",
"def isIntersection(self, v):\n return (any(inter.v == v for inter in self.inter1) or\n any(inter.v == v for inter in self.inter2))",
"def _is_primitive(val):\n\n prims = [int, float, str, bool]\n for prim in prims:\n if isinstance(val, prim):\n return True\n return False",
"def is_primitive(v):\n return isinstance(v, (int, float, bool, str))",
"def is_in(self, e):\n return e in self.vals",
"def contains(self, value):\n return value in self.values",
"def __contains__(self,v):\n for i in self._items:\n if near(i,v):\n return True\n return False",
"def contains(self, Vobj):\n try:\n if Vobj.is_vector(): # assume we were passed a point\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )\n except AttributeError:\n pass\n\n if Vobj.is_line():\n return self.polyhedron()._is_zero( self.eval(Vobj) )\n else:\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )",
"def could_be_boolean(val):\n if val == None:\n return False\n\n if isinstance(val, bool):\n return True\n\n if isinstance(val, (str, unicode)):\n if val.lower() in ['true', '1', 'false', '0']:\n return True\n\n if isinstance(val, (int, long)):\n if val in [0,1]:\n return True\n\n return False",
"def a_list(test_val: object, test_col: object, valid_values: object) -> object:\n tv_upper = test_val.upper()\n rc: bool = True\n # noinspection PyTypeChecker\n value_list = [x[test_col] for x in valid_values]\n value_list_upper = [x.upper() for x in value_list]\n if tv_upper not in value_list_upper:\n print(f'{test_val} is invalid. Valid values are {str(value_list)}')\n rc = False\n return rc",
"def containsValue(self, value):\n for val in values():\n if val == value or val == value:\n return True\n return False",
"def _validate(self, value, **options):\n\n super()._validate(value, **options)\n\n current_valid = self.valid_values\n if value not in current_valid:\n raise self.not_in_value_error(self.not_in_value_message.format(\n param_name=self._get_field_name(**options),\n values=self._get_list_representation(current_valid)))",
"def has_value(cls, value):\n return value in [item.value for item in cls]",
"def check_permutation(u, v):\n for permutation in itertools.permutations(u):\n if v == permutation:\n return True\n return False",
"def in_list(value, arg):\r\n return value in arg",
"def contains(self, Vobj):\n try:\n if Vobj.is_vector(): # assume we were passed a point\n return self.polyhedron()._is_nonneg( self.eval(Vobj) ) \n except AttributeError:\n pass\n \n if Vobj.is_line(): \n return self.polyhedron()._is_zero( self.eval(Vobj) )\n else:\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )",
"def in_list(verifield, checklist):\n return verifield in checklist",
"def contains(s, v):\n if empty(s):\n return False\n elif s.first == v:\n return True\n else:\n return contains(s.rest, v)",
"def has_value(cls, value):\n return any(value == item.value for item in cls)",
"def has_value(cls, value):\n return any(value == item.value for item in cls)",
"def __contains__(self, item: Union[T, Rangelike]) -> bool:\n if self == item:\n return True\n with suppress(TypeError):\n if _is_iterable_non_string(item):\n with suppress(ValueError):\n return all(\n any(subitem in rng for rng in self._ranges)\n for subitem in RangeSet._to_rangeset(item)\n )\n return any(item in rng for rng in self._ranges)",
"def type_valid(self):\n return contain_in_list_equal(self._type_or_ref, PARAM_RES_TYPES)"
] | [
"0.65102774",
"0.65090954",
"0.62292314",
"0.62203526",
"0.62055993",
"0.6187271",
"0.60626274",
"0.5736672",
"0.5600317",
"0.55947465",
"0.55650556",
"0.55649483",
"0.5529547",
"0.55027205",
"0.54704666",
"0.54414445",
"0.5438185",
"0.54267836",
"0.54200864",
"0.5377313",
"0.5375138",
"0.5347906",
"0.5347856",
"0.5343333",
"0.53233755",
"0.53233033",
"0.53151786",
"0.53151786",
"0.5297118",
"0.5280881"
] | 0.7078393 | 0 |
return True if every element in list |sublist| is in one of the lists contained in |lists|, False otherwise. Legal elements in |sublist| or the lists in |lists| are any primitive (int, float, str, bool), list, or dict. If an illegal element exists in |sublist|, an InvalidSettingError is raised | def _is_sublist_in_one_of_lists(sublist, lists):
type_to_one_of = Settings._get_type_to_one_of()
for vl in lists:
next_vl = False
for e in sublist:
if Settings._is_primitive(e):
t = 'primitive'
elif Settings._is_list(e):
vl = [l for l in vl if isinstance(l, list)]
t = 'list'
elif Settings._is_dict(e):
vl = [d for d in vl if isinstance(d, dict)]
t = 'dict'
else:
raise InvalidSettingError()
if not type_to_one_of[t](e, vl):
next_vl = True
break
if next_vl:
continue
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sublist_in(lst, sublst):\n for i in sublst:\n if i not in lst:\n return False\n return True",
"def contains(base, sub_list):\n\n return set(base) & set(sub_list) == set(sub_list)",
"def _is_in_list(l, valid_l):\n\n for elem in l:\n if Settings._is_primitive(elem):\n if not Settings._is_in_prim(elem, valid_l):\n return False\n elif Settings._is_list(elem):\n valid_lists = [l for l in valid_l if isinstance(l, list)]\n if not Settings._is_sublist_in_one_of_lists(elem, valid_lists):\n return False\n elif Settings._is_dict(elem):\n valid_dicts = [d for d in valid_l if isinstance(d, dict)]\n if not Settings._is_dict_in_one_of_dicts(elem, valid_dicts):\n return False\n else:\n raise InvalidSettingError()\n return True",
"def contains_sublist(lst, sublst):\n n = len(sublst)\n return any((sublst == lst[i:i + n]) for i in xrange(len(lst) - n + 1))",
"def is_superlist(self, list_of_setlists, compared_setlist):\n matches = list(filter(\n lambda x: x,\n [\n set(l[i]).issuperset(set(compared_setlist[i]))\n if i < len(compared_setlist) else False\n for l in list_of_setlists\n for i in range(len(l))\n ]\n ))\n return any(matches)",
"def allIn(listA: Union[int, List[int]], listB: Union[int, List[int]]) -> bool:\n if isinstance(listA, int):\n listA = [listA]\n if isinstance(listB, int):\n return listB in listA\n else:\n for item in listB:\n if item not in listA:\n return False\n return True",
"def allin(list1, list2):\n for rule1 in list1:\n literals1 = [literal for literal in rule1]\n for rule2 in list2:\n literals2 = [literal for literal in rule2]\n if literals1 != literals2:\n # If there is one rule different, then is not a sublist\n return False\n return True",
"def oneof(item_list, items):\n for i in item_list:\n if type(i) == type(list()) or type(i) == type(dict()):\n if sublist_in(item_list, i):\n return True\n else:\n if i in items: return True\n\n return False",
"def is_lili_subset(sub_lili, full_lili):\n if len(sub_lili) != len(full_lili):\n warnings.warn(\"Inputs should have same length\")\n for i, li in enumerate(sub_lili):\n if len(li) > 0 and not set(li).issubset(set(full_lili[i])):\n return False\n return True",
"def all_in_set(the_set, the_list):\n return True",
"def assert_all_lists_mutally_exclusive(list_of_lists):\n for idx, list1 in enumerate((list_of_lists)):\n for list2 in list_of_lists[idx + 1:]:\n if any(elem in list2 for elem in list1):\n raise ValueError(\n 'found matching items between two lists: \\n {}\\n {}'.format(\n ', '.join(list1),\n ', '.join(list2),\n ))",
"def list_should_contain_sub_list(self,list1,list2,msg=None,values=True):\r\n diffs = ', '.join(unic(item) for item in list2 if item not in list1)\r\n default = 'Folling values were not found form first list:'+ diffs\r\n _verify_condition(diffs == '',default,msg,values)",
"def all_lists(lst):\n\n for item in lst:\n if not isinstance(item, lst):\n return False\n\n return True\n\n # Alternate possibilities: use all() with a generator comprehension,\n # though that isn't something we've covered yet:\n #\n # return all(isinstance(item, list) for item in lst)",
"def has_sublist(l, sublist):\n sublist_length = len(sublist)\n l_length = len(l)\n \"*** YOUR CODE HERE ***\"\n # if sublist_length == 0:\n # return True\n # if sublist[0] not in l:\n # return False\n # else:\n # return has_sublist(l, sublist[1:])\n if sublist_length > l_length:\n return False\n elif l[0: sublist_length] == sublist:\n return True\n else:\n return has_sublist(l[1:], sublist)",
"def __isSpwContained(self, spwlist, subms_spws):\n \n isSelected = False\n \n # Check if the selected spws are in the subMS\n if set(spwlist) <= set(subms_spws):\n isSelected = True\n \n return isSelected",
"def _profile_contains_subset_list(self, profile: list[set[int]], input_set: set[int], start: int, length: int) \\\n -> bool:\n if length == 0:\n return True\n else:\n for index in range(start, len(profile)):\n if profile[index].issubset(set(input_set)):\n if self._profile_contains_subset_list(profile, profile[index], index + 1, length - 1):\n return True\n return False",
"def is_subset(listA,listB):\n all(item in listA for item in listB)",
"def exclusive_in(in_list,master_list):\n\tif in_list==[]:\n\t\treturn True\n\telse:\n\t\tfor elem in in_list:\n\t\t\tif elem not in master_list:\n\t\t\t\treturn False\n\t\treturn True",
"def is_in_list(list_one, list_two):\n \n for element in list_one:\n if element in list_two:\n return True\n return False",
"def all_in_list (list1, list2):\n return all(map(lambda c: c in list2, list1) )",
"def _list_validity_check(l, valid_l):\n\n if not Settings._is_in_list(l, valid_l):\n raise InvalidSettingError()",
"def contains_sublist(lst, sublst):\n for i in range(0, len(lst), 1):\n if sublst == lst[i]:\n return i",
"def IsEveryNodeInTheList(self, list_to_check):\n for node in self.nodes:\n if node.index not in list_to_check:\n return False\n return True",
"def check_list_exists(this_list=[]):\n if isinstance(this_list, list) and len(this_list) > 0:\n return True\n else:\n return False",
"def __isScanContained(self, subms, scanlist, tbin):\n isContained = False \n \n mymsmd = msmdtool()\n mymsmd.open(subms)\n \n # Check if subms scans contain all selected scans\n hasScans = False\n s = mymsmd.scannumbers()\n subms_scans = map(str, s)\n if set(scanlist) <= set(subms_scans):\n hasScans = True\n \n if hasScans:\n t = mymsmd.timesforscans(s)\n mymsmd.close()\n t_range = t.max() - t.min()\n \n if t_range >= tbin: \n isContained = True\n \n return isContained",
"def _contains(self, element):\n if not isinstance(element, Tuple) or len(element) != 2:\n return S.false\n\n if not element[1].is_Integer:\n return S.false\n\n if element[1] >= len(self.sets) or element[1] < 0:\n return S.false\n\n return self.sets[element[1]]._contains(element[0])",
"def all(selectors, subitem): #pylint: disable=redefined-builtin\n for sel in selectors:\n if isinstance(sel, list):\n passed = False\n for subsel in sel:\n if subsel(subitem):\n passed = True\n break\n if not passed:\n return False\n elif not sel(subitem):\n return False\n return True",
"def substring_in_list(s, varlist):\n if varlist is None:\n return False\n is_sub = False\n for v in varlist:\n if v in s:\n is_sub = True\n break\n return is_sub",
"def list_should_contain_value(self,list_,value,msg=None):\r\n\r\n default =\"%s contains value '%s'\" %(seq2str(list_),value)\r\n _verify_condition(vlaue not in list_,default,msg)",
"def contains(list, e):\r\n for elem in list:\r\n if elem == e:\r\n return True\r\n return False"
] | [
"0.7168912",
"0.6911516",
"0.689753",
"0.6571266",
"0.6512159",
"0.6431463",
"0.64059114",
"0.63500553",
"0.6314237",
"0.6075374",
"0.6047245",
"0.59738773",
"0.5970836",
"0.5957095",
"0.5953869",
"0.59509546",
"0.58234435",
"0.57280076",
"0.5707694",
"0.5649077",
"0.56412464",
"0.5625276",
"0.5611447",
"0.55839044",
"0.5573908",
"0.555064",
"0.5544975",
"0.55421764",
"0.55197954",
"0.54395515"
] | 0.8294943 | 0 |
return True if dict |d| is in one of the dicts in |dicts|, False otherwise. |dicts| is obviously just a list of dictionaries. Legal elements in the dictionaries are the typical primitives (int, float, bool, str), lists, and dicts. | def _is_dict_in_one_of_dicts(d, dicts):
for vd in dicts:
if Settings._is_in_dict(d, vd):
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _is_in_dict(d, valid_d):\n\n for k, v in d.items():\n if k not in valid_d:\n return False\n else:\n if Settings._is_primitive(v):\n if not Settings._is_in_prim(v, valid_d[k]):\n return False\n elif Settings._is_list(v):\n if not Settings._is_in_list(v, valid_d[k]):\n return False\n elif Settings._is_dict(v):\n if isinstance(valid_d[k], dict):\n if not Settings._is_in_dict(v, valid_d[k]):\n return False\n elif isinstance(valid_d[k], list):\n if not Settings._is_dict_in_one_of_dicts(v, valid_d[k]):\n return False\n else:\n raise InvalidSettingError()\n else:\n raise InvalidSettingError()\n return Settings._has_all_keys_from(d, valid_d)",
"def have(keylist, dic):\n return all(key in dic and dic[key] for key in keylist)",
"def dict_equal(d1: Dict, d2: Dict) -> bool:\n\n # iterate over the dict with more keys\n # di is the dictionary to iterate over\n # dj is the one to compare to\n if len(d2) > len(d1):\n di = d2\n dj = d1\n else:\n di = d1\n dj = d2\n for key, value in di.items():\n # check if key is also in d2 and if the value is the same\n if key not in dj.keys():\n return False\n else:\n value_j = dj[key]\n if type(value) is dict and type(value_j) is dict:\n # if its again a dictionary -> recursion\n if not dict_equal(value, value_j):\n return False\n\n elif type(value) is np.ndarray and type(value_j) is np.ndarray:\n if not np.array_equal(value, value_j):\n return False\n\n # check if both are the same type of object\n elif type(value) is not type(value_j):\n return False\n\n elif value != value_j:\n return False\n\n return True",
"def dict_contains(dct, keys):\n\n assert isinstance(dct, dict), \"dict_contains: dct should be of type dict \"\n assert type(keys) in [int, str, list], \"dict_contains: keys should be of type list or string \"\n if not type(keys) == list:\n keys = [keys]\n\n return contains(dct.keys(), keys)",
"def contains(dictionary, keys):\n if keys is None:\n return True\n keys = keys if isinstance(keys, list) else [keys]\n for key in keys:\n if key not in dictionary:\n return False\n return True",
"def verifyDictTypes( template, dictToCheck ):\n for key in dictToCheck:\n if not ( ( isinstance( dictToCheck[ key ], list ) and\n isinstance( template[ key ], list ) ) or\n ( isinstance( dictToCheck[ key ], dict ) and\n isinstance( template[ key ], dict ) ) or\n ( isinstance( dictToCheck[ key ], template[ key ] ) ) ):\n return False\n\n return True",
"def dict_equal(d1, d2):\n if isinstance(d1, dict) and isinstance(d2, dict):\n # check keysets\n if set(d1) != set(d2):\n return False\n\n # otherwise loop through all the keys and check if the dicts and items are equal\n return all((dict_equal(d1[key], d2[key]) for key in d1))\n\n # check equality on other objects\n else:\n return d1 == d2",
"def dict_contains(subdict, maindict):\n try:\n for k, v in subdict.items():\n mainv = maindict[k]\n if isinstance(mainv, dict) and isinstance(v, dict):\n if not dict_contains(v, mainv):\n return False\n elif isinstance(mainv, (set, frozenset)):\n return set(v) == mainv\n elif mainv != v:\n return False\n except KeyError:\n return False\n return True",
"def find_bool(name, *dicts):\n value = False\n\n for d in dicts:\n if type(d) == str:\n value = d == 'True'\n elif name in d:\n value = d[name]\n if type(value) == str:\n value = value == 'True'\n\n if value:\n return True\n\n return False",
"def dictionary_shoule_contain_value(self,dictionary,value,msg=None):\r\n default = \"Dictionary does not contain value '%s'\"%value\r\n _verify_condition(value in dictionary.values(),default,msg)",
"def _dict_contains(dict_a, dict_b):\n return dict_a.items() >= dict_b.items()",
"def dict_has_items(obj, items):\n has_items = False\n if isinstance(obj, basestring):\n obj = json.loads(obj)\n for item in items:\n for lookup_key, lookup_val in item.iteritems():\n if lookup_key in obj and obj[lookup_key] == lookup_val:\n has_items = True\n else:\n return False\n return has_items",
"def do_contains(d, *ks):\n try:\n _ = do_get(d, *ks)\n except KeyError:\n return False\n else:\n return True",
"def compare_dicts(dict1, dict2):\n for k,v in dict2.items():\n if v != dict1[k]:\n return False\n return True",
"def keys_exists(multi_dict: dict, keys: List[str]) -> bool:\n _multi_dict = multi_dict\n for key in keys:\n try:\n _multi_dict = _multi_dict[key]\n except KeyError:\n return False\n return True",
"def validate_dict(in_dict, **kwargs):\n\n if not isinstance(in_dict, dict):\n raise ValueError('requires a dictionary')\n\n for key, value in iteritems(kwargs):\n\n if key == 'required':\n for required_key in value:\n if required_key not in in_dict:\n return False\n\n elif key not in in_dict:\n continue\n\n elif value == bool:\n\n in_dict[key] = (True\n if str(in_dict[key]).lower() == 'true'\n else False)\n\n else:\n\n if (isinstance(in_dict[key], list) and\n len(in_dict[key]) == 1 and\n value != list):\n in_dict[key] = in_dict[key][0]\n\n try:\n if key in in_dict:\n in_dict[key] = value(in_dict[key])\n except ValueError:\n return False\n\n return True",
"def anyMoreThanOne(dict, keys):\n\tfor key in keys:\n\t\tif key in dict and dict[key] > 0:\n\t\t\treturn True\n\treturn False",
"def assertContainsDict(self, dictionary, data):\n for key in dictionary:\n self.assertTrue(key in data, msg=\"Data doesn't have key '{}'\".format(key))\n value = dictionary[key]\n value2 = data[key]\n self.assertEqual(value, value2,\n msg=\"key={}, value={} != target={}\".format(key, value, value2))",
"def isWord(word, dictionary):\n return word in dictionary",
"def checkFieldsMatch(fieldNames, fieldsList, dictList):\n if len(fieldsList) != len(dictList):\n return False\n for d in dictList:\n arow = ()\n for i in range(len(fieldNames)):\n arow += (d[fieldNames[i]], )\n if arow not in fieldsList:\n return False\n fieldsList.remove(arow)\n return True",
"def is_collision(dict_a, dict_b):\n\n intersection = set(dict_a.values()) & set(dict_b.values())\n if not intersection:\n # Empty\n return False\n else:\n # Not Empty\n return True",
"def _find_equivalent(searched_dict, dicts_list):\n for id_key in ('id', 'uid', 'name'):\n # Recognize the ID key used, if any\n local_id = searched_dict.get(id_key)\n if local_id:\n # Found an ID\n for other_item in dicts_list:\n if other_item.get(id_key) == local_id:\n # Found an item with the same ID\n return other_item\n \n # Found nothings\n return None",
"def words_in_dictionary(word_list):\n for word in word_list:\n word = word.lower()\n raw_word = word.replace(\"'\", '').replace('.', '')\n if word not in DICTIONARY_LOWER and raw_word not in DICTIONARY_LOWER:\n return False\n return True",
"def _is_in_doc(t: int, d: List[List[str]]) -> bool:\n t = str(t)\n for s in d:\n if t in s:\n return True\n return False",
"def compare_dicts(dict1, dict2, dict1_name=\"d1\", dict2_name=\"d2\", path=\"\"):\n # Setup paths to track key exploration. The path parameter is used to allow\n # recursive comparisions and track what's being compared.\n result = True\n for key in dict1.keys():\n dict1_path = \"{}{}[{}]\".format(dict1_name, path, key)\n dict2_path = \"{}{}[{}]\".format(dict2_name, path, key)\n if key not in dict2.keys():\n log.debug(\"%s not a valid key in %s.\", dict1_path, dict2_path)\n result = False\n elif isinstance(dict1[key], dict) and isinstance(dict2[key], dict):\n log.debug(\n \"%s and %s contain dictionary. Evaluating.\", dict1_path,\n dict2_path\n )\n result = compare_dicts(\n dict1[key], dict2[key], dict1_name, dict2_name,\n path=\"[{}]\".format(key)\n )\n elif isinstance(dict1[key], list) and isinstance(dict2[key], list):\n log.debug(\n \"%s and %s key '%s' contains list. Validating dict1 items \"\n \"exist in dict2.\", dict1_path, dict2_path, key\n )\n if not all([bool(item in dict2[key]) for item in dict1[key]]):\n log.debug(\n \"Mismatch: %s value is '%s' while %s value is '%s'.\",\n dict1_path, dict1[key], dict2_path, dict2[key]\n )\n result = False\n # Hack for NetBox v2.6.7 requiring integers for some values\n elif key in [\"status\", \"type\"]:\n if dict1[key] != dict2[key][\"value\"]:\n log.debug(\n \"Mismatch: %s value is '%s' while %s value is '%s'.\",\n dict1_path, dict1[key], dict2_path, dict2[key][\"value\"]\n )\n result = False\n elif dict1[key] != dict2[key]:\n log.debug(\n \"Mismatch: %s value is '%s' while %s value is '%s'.\",\n dict1_path, dict1[key], dict2_path, dict2[key]\n )\n # Allow the modification of device sites by ignoring the value\n if \"site\" in path and key == \"name\":\n log.debug(\"Site mismatch is allowed. Moving on.\")\n else:\n result = False\n if result:\n log.debug(\"%s and %s values match.\", dict1_path, dict2_path)\n else:\n log.debug(\"%s and %s values do not match.\", dict1_path, dict2_path)\n return result\n log.debug(\"Final dictionary compare result: %s\", result)\n return result",
"def _dict_validity_check(d, valid_d):\n\n if not Settings._is_in_dict(d, valid_d):\n raise InvalidSettingError()",
"def equivalent_dicts(_a, _b):\n for _key in _a.keys():\n if _a[_key] != _b[_key]:\n return False\n return True",
"def isInDic(dic, key):\n pass",
"def intersect(*d):\n sets = iter(map(set, d))\n result = sets.next()\n for s in sets:\n result = result.intersection(s)\n return result",
"def match_two_dicts(small_dict, big_dict):\n big_dict_keys = big_dict.keys()\n for key in small_dict.keys():\n if key not in big_dict_keys:\n raise KeyError(\"Wrong argument name '%s'\" % key)\n return True"
] | [
"0.64084905",
"0.60281664",
"0.5984001",
"0.5980985",
"0.59457546",
"0.5872692",
"0.57876366",
"0.57641065",
"0.5747921",
"0.57217395",
"0.5614708",
"0.5484641",
"0.54599655",
"0.53625906",
"0.5358206",
"0.5351264",
"0.533104",
"0.53166837",
"0.5306886",
"0.5287404",
"0.5281739",
"0.52534944",
"0.5161521",
"0.51466405",
"0.5126394",
"0.51215076",
"0.5119926",
"0.50937855",
"0.5088963",
"0.50753355"
] | 0.8241347 | 0 |
return True if all elements in list |l| is in one of the lists contained in |valid_l|, False otherwise. Legal elements in the lists are the typical primitives (int, float, bool, str), lists, and dicts. | def _is_in_list(l, valid_l):
for elem in l:
if Settings._is_primitive(elem):
if not Settings._is_in_prim(elem, valid_l):
return False
elif Settings._is_list(elem):
valid_lists = [l for l in valid_l if isinstance(l, list)]
if not Settings._is_sublist_in_one_of_lists(elem, valid_lists):
return False
elif Settings._is_dict(elem):
valid_dicts = [d for d in valid_l if isinstance(d, dict)]
if not Settings._is_dict_in_one_of_dicts(elem, valid_dicts):
return False
else:
raise InvalidSettingError()
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _list_validity_check(l, valid_l):\n\n if not Settings._is_in_list(l, valid_l):\n raise InvalidSettingError()",
"def allIn(listA: Union[int, List[int]], listB: Union[int, List[int]]) -> bool:\n if isinstance(listA, int):\n listA = [listA]\n if isinstance(listB, int):\n return listB in listA\n else:\n for item in listB:\n if item not in listA:\n return False\n return True",
"def compare(self,l):\r\n\t\t\r\n\t\t# assume equality\r\n\t\tq = True\r\n\t\t\r\n\t\t# test term by term\r\n\t\tfor i,j in zip(self,l):\r\n\t\t\t\r\n\t\t\t# break at first mismatch\r\n\t\t\tif not i.compare(j):\r\n\t\t\t\tq = False\r\n\t\t\t\tbreak\r\n\t\t\t\t\r\n\t\t# make sure lengths are also equal\r\n\t\tif len(self) != len(l):\r\n\t\t\tq = False\r\n\t\t\t\t\r\n\t\treturn q",
"def all_lists(lst):\n\n for item in lst:\n if not isinstance(item, lst):\n return False\n\n return True\n\n # Alternate possibilities: use all() with a generator comprehension,\n # though that isn't something we've covered yet:\n #\n # return all(isinstance(item, list) for item in lst)",
"def oneof(item_list, items):\n for i in item_list:\n if type(i) == type(list()) or type(i) == type(dict()):\n if sublist_in(item_list, i):\n return True\n else:\n if i in items: return True\n\n return False",
"def IsValidInputType(self, list_of_matches):\n for entry in list_of_matches:\n if not entry:\n return False\n\n return True",
"def is_lili_subset(sub_lili, full_lili):\n if len(sub_lili) != len(full_lili):\n warnings.warn(\"Inputs should have same length\")\n for i, li in enumerate(sub_lili):\n if len(li) > 0 and not set(li).issubset(set(full_lili[i])):\n return False\n return True",
"def validate_loans(loans):\n def validate_loan(loan):\n return (type(loan)==list or type(loan)==tuple) and len(loan)==3 \\\n and type(loan[0])==str and type(loan[1])==str and loan[1] in database.LOANABLE_RESOURCES and type(loan[2])==int and loan[2]>=0\n return type(loans)==list and False not in [validate_loan(load) for loan in loans]",
"def contains_all(self, *items):\n return all(item in self for item in items)",
"def _listContains(self, l, entry):\n for i in range(0, len(l)):\n if l[i] == entry:\n return True\n return False",
"def allin(list1, list2):\n for rule1 in list1:\n literals1 = [literal for literal in rule1]\n for rule2 in list2:\n literals2 = [literal for literal in rule2]\n if literals1 != literals2:\n # If there is one rule different, then is not a sublist\n return False\n return True",
"def _is_sublist_in_one_of_lists(sublist, lists):\n\n type_to_one_of = Settings._get_type_to_one_of()\n\n for vl in lists:\n next_vl = False\n for e in sublist:\n if Settings._is_primitive(e):\n t = 'primitive'\n elif Settings._is_list(e):\n vl = [l for l in vl if isinstance(l, list)]\n t = 'list'\n elif Settings._is_dict(e):\n vl = [d for d in vl if isinstance(d, dict)]\n t = 'dict'\n else:\n raise InvalidSettingError()\n\n if not type_to_one_of[t](e, vl):\n next_vl = True\n break\n\n if next_vl:\n continue\n return True\n return False",
"def all_in_list (list1, list2):\n return all(map(lambda c: c in list2, list1) )",
"def list_check(lst):\n for item in lst:\n if type(item) != list:\n return False\n return True",
"def isValidTypeForList(self, *args):\n return _libsbml.SBasePlugin_isValidTypeForList(self, *args)",
"def validate_list(validators, data):\n if type(data) is not list:\n return False\n n_validators = len(validators)\n if n_validators == 0:\n return len(data) == 0\n elif n_validators == 1:\n validator = validators[0]\n return all(imap(lambda item: validate_common(validator, item), data))\n elif n_validators > 1:\n raise NotImplementedError(\"You cannot specify more than one validator for list at the moment.\")",
"def check_list_exists(this_list=[]):\n if isinstance(this_list, list) and len(this_list) > 0:\n return True\n else:\n return False",
"def isList(l):\r\n return hasattr(l, '__iter__') \\\r\n or (type(l) in (types.ListType, types.TupleType))",
"def IsEveryNodeInTheList(self, list_to_check):\n for node in self.nodes:\n if node.index not in list_to_check:\n return False\n return True",
"def assert_all_lists_mutally_exclusive(list_of_lists):\n for idx, list1 in enumerate((list_of_lists)):\n for list2 in list_of_lists[idx + 1:]:\n if any(elem in list2 for elem in list1):\n raise ValueError(\n 'found matching items between two lists: \\n {}\\n {}'.format(\n ', '.join(list1),\n ', '.join(list2),\n ))",
"def is_list_of_list(self) -> bool:\n return bool(AnnotationWrapper.list_of_list_re.match(self.data))",
"def check_inputs(self, inputs):\n if self.debug:\n print(\"Checking inputs\")\n result = True\n for _input in inputs:\n if \"word_\" in _input and inputs[_input] == \"\":\n result = False\n elif \"idiom_\" in _input and inputs[_input] == \"\":\n if \"list\" not in _input:\n result = False\n return result",
"def is_superlist(self, list_of_setlists, compared_setlist):\n matches = list(filter(\n lambda x: x,\n [\n set(l[i]).issuperset(set(compared_setlist[i]))\n if i < len(compared_setlist) else False\n for l in list_of_setlists\n for i in range(len(l))\n ]\n ))\n return any(matches)",
"def isInList(valid_positions, pos):\n assert isinstance(pos, Position)\n for position in valid_positions:\n if pos.compare(position):\n return position\n return False",
"def empty_list(input_list):\n for item in input_list:\n if not isinstance(item, list) or not empty_list(item):\n return False\n return True",
"def contains(list_, filter_):\n for x in list_:\n if filter_(x):\n return True\n return False",
"def is_valid_integer_list(any_list):\n list_object = json.loads(any_list)\n return not any(not is_valid_integer(str(listing_id)) for listing_id in\n list_object)",
"def uniqueCheckSet(aList):\r\n check = set()\r\n for v in aList:\r\n if v in check:\r\n return True\r\n check.add(v)\r\n return False",
"def validate(s):\n\n # base case: square is 1 or 0\n if s == 1 or s == 0:\n return True\n\n # list of length 4\n if isinstance(s, list) and len(s) == 4:\n\n # idea one: fail fast\n for i in s:\n if not validate(i):\n return False\n return True\n\n # idea 2: \"and\" the results ALSO fail fast\n # return (validate(s[0]) and \n # validate(s[1]) and \n # validate(s[2]) and \n # validate(s[3]))\n # OR\n # return all([validate(i) for i in s])\n\n # idea 3: multiply the results: will not return boolean\n # return (validate(s[0]) * \n # validate(s[1]) * \n # validate(s[2]) * \n # validate(s[3]))\n\n # not one of our numbers or list of length 4\n # another base case\n return False",
"def sublist_in(lst, sublst):\n for i in sublst:\n if i not in lst:\n return False\n return True"
] | [
"0.7000915",
"0.6149883",
"0.61312413",
"0.6082698",
"0.59904224",
"0.5970148",
"0.5934717",
"0.5885762",
"0.5882798",
"0.5851436",
"0.5785513",
"0.5773051",
"0.57016706",
"0.5689442",
"0.56725544",
"0.5652237",
"0.5626126",
"0.56227636",
"0.5618623",
"0.56080157",
"0.5601694",
"0.5578278",
"0.55532265",
"0.5537523",
"0.5531834",
"0.55284375",
"0.55185485",
"0.5508726",
"0.54877746",
"0.5486937"
] | 0.7949676 | 0 |
return True if dict |d| has all keys in dict |valid_d|. False otherwise. | def _has_all_keys_from(d, valid_d):
for k, v in valid_d.items():
if k not in d:
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _is_in_dict(d, valid_d):\n\n for k, v in d.items():\n if k not in valid_d:\n return False\n else:\n if Settings._is_primitive(v):\n if not Settings._is_in_prim(v, valid_d[k]):\n return False\n elif Settings._is_list(v):\n if not Settings._is_in_list(v, valid_d[k]):\n return False\n elif Settings._is_dict(v):\n if isinstance(valid_d[k], dict):\n if not Settings._is_in_dict(v, valid_d[k]):\n return False\n elif isinstance(valid_d[k], list):\n if not Settings._is_dict_in_one_of_dicts(v, valid_d[k]):\n return False\n else:\n raise InvalidSettingError()\n else:\n raise InvalidSettingError()\n return Settings._has_all_keys_from(d, valid_d)",
"def _dict_validity_check(d, valid_d):\n\n if not Settings._is_in_dict(d, valid_d):\n raise InvalidSettingError()",
"def check_keys_in_dict(dictionary, keys):\n if not all(key in dictionary for key in keys):\n raise KeyError(\"Dictionary missing key values.\"\n \"Requires: {}\".format(keys))\n return True",
"def has_keys(self) -> bool:\n \n for key, value in self.key_satified.items():\n if value is not True:\n return False\n return True",
"def all_false(d):\n # false_count should be the same as the length of d\n false_count = sum(v is False for v in d.values())\n return false_count == len(d)",
"def have(keylist, dic):\n return all(key in dic and dic[key] for key in keylist)",
"def valid_compatible_data(compatible_data):\n if not isinstance(compatible_data, dict):\n return False\n if set(compatible_data.keys()) != compatible_data_keys_set:\n return False\n for key in compatible_data:\n boolean = (compatible_data[key] is True or\n compatible_data[key] is False)\n if not boolean:\n return False\n return True",
"def all_keys_not_none(d: dict, required: list):\n passed = 0\n for r in required:\n v = d.get(r)\n if v is not None:\n passed += 1\n\n return len(required) == passed",
"def is_in(cls, hierarchical_dict: dict, key: str) -> bool:\n return key in cls.get_all_keys(hierarchical_dict)",
"def _is_dict_in_one_of_dicts(d, dicts):\n\n for vd in dicts:\n if Settings._is_in_dict(d, vd):\n return True\n return False",
"def any_keys_not_none(d: dict, required: list):\n passed = 0\n for r in required:\n v = d.get(r)\n if v is not None:\n passed += 1\n\n if len(required) == 1 and passed == len(required): # Exclusion for sequence with 1 element\n return True\n\n return 0 < passed < len(required)",
"def _is_valid(key):\n is_valid = False\n for valid_key in VALID_KEYS:\n if valid_key in key:\n is_valid = True\n for invalid_key in INVALID_KEYS:\n if invalid_key in key:\n is_valid = False\n return is_valid",
"def is_conflicting_keys(cls, d1, d2):\n return bool(set(d1.keys()).intersection(set(d2.keys())))",
"def keys_exists(multi_dict: dict, keys: List[str]) -> bool:\n _multi_dict = multi_dict\n for key in keys:\n try:\n _multi_dict = _multi_dict[key]\n except KeyError:\n return False\n return True",
"def contains(dictionary, keys):\n if keys is None:\n return True\n keys = keys if isinstance(keys, list) else [keys]\n for key in keys:\n if key not in dictionary:\n return False\n return True",
"def is_valid(self):\n\n # Test whether every element in required_keys is in actual_keys\n actual_keys = set(self.fields.keys())\n required_keys = set(self.required_keys)\n has_required_keys = required_keys <= actual_keys\n if not has_required_keys:\n return False\n\n # TODO: Complete the following block. \n\n # Assume all is valid at first, then as soon as one invalid\n # is detected, whole thing becomes invalid.\n all_valid = True \n\n # Now iterate over each key-value pair to check\n for key, value in self.fields.items():\n if key == 'byr':\n this_key_valid = len(str(value)) == 4 and (1920 <= value <= 2002)\n all_valid = all_valid and this_key_valid\n if key == 'iyr':\n this_key_valid = len(str(value)) == 4 and (2010 <= value <= 2020)\n all_valid = all_valid and this_key_valid\n if key == 'eyr':\n this_key_valid = len(str(value)) == 4 and (2020 <= value <= 2030)\n all_valid = all_valid and this_key_valid\n if key == 'hgt':\n if len(str(value)) < 4:\n all_valid = False\n else:\n ending = value[-2:]\n num = int(value[:-2])\n this_key_valid = (ending == 'in' and (59 <= num <= 76)) or (ending == 'cm' and (150 <= num <= 193))\n all_valid = all_valid and this_key_valid\n if key == 'hcl':\n re_str = '#[0-9a-f]{6}'\n this_key_valid = re.search(re_str, str(value)) is not None and len(str(value)) == 7\n all_valid = all_valid and this_key_valid\n if key == 'ecl':\n this_key_valid = value in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n all_valid = all_valid and this_key_valid\n if key == 'pid':\n re_str = '[0-9]{9}'\n this_key_valid = re.search(re_str, str(value)) is not None and len(str(value)) == 9\n all_valid = all_valid and this_key_valid\n if key == 'cid':\n this_key_valid = True\n all_valid = all_valid and this_key_valid\n\n # If all fields are valid, return True\n return all_valid",
"def validate_dict(in_dict, **kwargs):\n\n if not isinstance(in_dict, dict):\n raise ValueError('requires a dictionary')\n\n for key, value in iteritems(kwargs):\n\n if key == 'required':\n for required_key in value:\n if required_key not in in_dict:\n return False\n\n elif key not in in_dict:\n continue\n\n elif value == bool:\n\n in_dict[key] = (True\n if str(in_dict[key]).lower() == 'true'\n else False)\n\n else:\n\n if (isinstance(in_dict[key], list) and\n len(in_dict[key]) == 1 and\n value != list):\n in_dict[key] = in_dict[key][0]\n\n try:\n if key in in_dict:\n in_dict[key] = value(in_dict[key])\n except ValueError:\n return False\n\n return True",
"def _key_check(self, key_list, chk_dict=None):\n exists = False\n if chk_dict is None:\n chk_dict = self._e_dict\n for key in key_list:\n exists = key in chk_dict.keys()\n if exists:\n chk_dict = chk_dict[key]\n else:\n break\n return exists",
"def valid(self) -> bool:\n are_populated = [bool(getattr(self, fld_nm)) for fld_nm in self.necessary_fields]\n return all(are_populated)",
"def check_condition(self, query_dict):\n return all(key in self.__data and self.__data[key] == value\n for key, value in query_dict.items())",
"def check_all_have_keys(dict_list, keys, name):\n if len(dict_list) == 0:\n return\n keys = set(keys)\n for dct in dict_list:\n if not keys.issubset(dct.keys()):\n raise DGLError('Expect all {} to include keys {}, but got {}.'.format(\n name, keys, dct.keys()))",
"def check_dict(dic, validator, messages):\n check_dict_alg(dic, validator, [], messages, validator, \"NoObject\")",
"def dict_contains(dct, keys):\n\n assert isinstance(dct, dict), \"dict_contains: dct should be of type dict \"\n assert type(keys) in [int, str, list], \"dict_contains: keys should be of type list or string \"\n if not type(keys) == list:\n keys = [keys]\n\n return contains(dct.keys(), keys)",
"def is_valid(self):\n return self.has_valid_values() and self.has_valid_sum()",
"def contains(self, key: int) -> bool:\n if key in self.d:\n return True\n else:\n return False",
"def key_exists(key, dictionary):\n return key in dictionary and dictionary[key] is not None",
"def check(actual_dict, raise_error=True):\r\n missing = set(expected_keys) - set(actual_dict.keys())\r\n if not missing:\r\n return True\r\n if raise_error:\r\n raise InvalidTabsException(\r\n \"Expected keys '{0}' are not present in the given dict: {1}\".format(expected_keys, actual_dict)\r\n )\r\n else:\r\n return False",
"def anyMoreThanOne(dict, keys):\n\tfor key in keys:\n\t\tif key in dict and dict[key] > 0:\n\t\t\treturn True\n\treturn False",
"def has_key(self, key):\n return key in self",
"def do_contains(d, *ks):\n try:\n _ = do_get(d, *ks)\n except KeyError:\n return False\n else:\n return True"
] | [
"0.7827297",
"0.680757",
"0.66193026",
"0.6566625",
"0.6357033",
"0.62774754",
"0.6261642",
"0.6259637",
"0.6217635",
"0.62153924",
"0.6203975",
"0.6185486",
"0.6110884",
"0.6004363",
"0.5946864",
"0.58883274",
"0.57816666",
"0.5731112",
"0.5690174",
"0.56880385",
"0.56877536",
"0.5647569",
"0.56077594",
"0.56066304",
"0.55876696",
"0.55819",
"0.55668527",
"0.5566615",
"0.5556735",
"0.55467147"
] | 0.87729234 | 0 |
return True if all dict |d| keys are in dict |valid_d|, values in |d| are legal values with respect to the valid values defined in |valid_d|, and all |valid_d| keys are in |d|. Values in |d| are determined legal based on Settings._is_in_prim(), Settings._is_list(), or recursively Settings._is_in_dict(). False otherwise. | def _is_in_dict(d, valid_d):
for k, v in d.items():
if k not in valid_d:
return False
else:
if Settings._is_primitive(v):
if not Settings._is_in_prim(v, valid_d[k]):
return False
elif Settings._is_list(v):
if not Settings._is_in_list(v, valid_d[k]):
return False
elif Settings._is_dict(v):
if isinstance(valid_d[k], dict):
if not Settings._is_in_dict(v, valid_d[k]):
return False
elif isinstance(valid_d[k], list):
if not Settings._is_dict_in_one_of_dicts(v, valid_d[k]):
return False
else:
raise InvalidSettingError()
else:
raise InvalidSettingError()
return Settings._has_all_keys_from(d, valid_d) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _dict_validity_check(d, valid_d):\n\n if not Settings._is_in_dict(d, valid_d):\n raise InvalidSettingError()",
"def _has_all_keys_from(d, valid_d):\n\n for k, v in valid_d.items():\n if k not in d:\n return False\n return True",
"def _is_dict_in_one_of_dicts(d, dicts):\n\n for vd in dicts:\n if Settings._is_in_dict(d, vd):\n return True\n return False",
"def _validity_check(settings, valid):\n\n Settings._dict_validity_check(settings, valid)",
"def is_valid(self):\n\n # Test whether every element in required_keys is in actual_keys\n actual_keys = set(self.fields.keys())\n required_keys = set(self.required_keys)\n has_required_keys = required_keys <= actual_keys\n if not has_required_keys:\n return False\n\n # TODO: Complete the following block. \n\n # Assume all is valid at first, then as soon as one invalid\n # is detected, whole thing becomes invalid.\n all_valid = True \n\n # Now iterate over each key-value pair to check\n for key, value in self.fields.items():\n if key == 'byr':\n this_key_valid = len(str(value)) == 4 and (1920 <= value <= 2002)\n all_valid = all_valid and this_key_valid\n if key == 'iyr':\n this_key_valid = len(str(value)) == 4 and (2010 <= value <= 2020)\n all_valid = all_valid and this_key_valid\n if key == 'eyr':\n this_key_valid = len(str(value)) == 4 and (2020 <= value <= 2030)\n all_valid = all_valid and this_key_valid\n if key == 'hgt':\n if len(str(value)) < 4:\n all_valid = False\n else:\n ending = value[-2:]\n num = int(value[:-2])\n this_key_valid = (ending == 'in' and (59 <= num <= 76)) or (ending == 'cm' and (150 <= num <= 193))\n all_valid = all_valid and this_key_valid\n if key == 'hcl':\n re_str = '#[0-9a-f]{6}'\n this_key_valid = re.search(re_str, str(value)) is not None and len(str(value)) == 7\n all_valid = all_valid and this_key_valid\n if key == 'ecl':\n this_key_valid = value in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n all_valid = all_valid and this_key_valid\n if key == 'pid':\n re_str = '[0-9]{9}'\n this_key_valid = re.search(re_str, str(value)) is not None and len(str(value)) == 9\n all_valid = all_valid and this_key_valid\n if key == 'cid':\n this_key_valid = True\n all_valid = all_valid and this_key_valid\n\n # If all fields are valid, return True\n return all_valid",
"def validate_dict(in_dict, **kwargs):\n\n if not isinstance(in_dict, dict):\n raise ValueError('requires a dictionary')\n\n for key, value in iteritems(kwargs):\n\n if key == 'required':\n for required_key in value:\n if required_key not in in_dict:\n return False\n\n elif key not in in_dict:\n continue\n\n elif value == bool:\n\n in_dict[key] = (True\n if str(in_dict[key]).lower() == 'true'\n else False)\n\n else:\n\n if (isinstance(in_dict[key], list) and\n len(in_dict[key]) == 1 and\n value != list):\n in_dict[key] = in_dict[key][0]\n\n try:\n if key in in_dict:\n in_dict[key] = value(in_dict[key])\n except ValueError:\n return False\n\n return True",
"def valid_compatible_data(compatible_data):\n if not isinstance(compatible_data, dict):\n return False\n if set(compatible_data.keys()) != compatible_data_keys_set:\n return False\n for key in compatible_data:\n boolean = (compatible_data[key] is True or\n compatible_data[key] is False)\n if not boolean:\n return False\n return True",
"def is_in(cls, hierarchical_dict: dict, key: str) -> bool:\n return key in cls.get_all_keys(hierarchical_dict)",
"def _is_valid(key):\n is_valid = False\n for valid_key in VALID_KEYS:\n if valid_key in key:\n is_valid = True\n for invalid_key in INVALID_KEYS:\n if invalid_key in key:\n is_valid = False\n return is_valid",
"def check_keys_in_dict(dictionary, keys):\n if not all(key in dictionary for key in keys):\n raise KeyError(\"Dictionary missing key values.\"\n \"Requires: {}\".format(keys))\n return True",
"def _is_in_list(l, valid_l):\n\n for elem in l:\n if Settings._is_primitive(elem):\n if not Settings._is_in_prim(elem, valid_l):\n return False\n elif Settings._is_list(elem):\n valid_lists = [l for l in valid_l if isinstance(l, list)]\n if not Settings._is_sublist_in_one_of_lists(elem, valid_lists):\n return False\n elif Settings._is_dict(elem):\n valid_dicts = [d for d in valid_l if isinstance(d, dict)]\n if not Settings._is_dict_in_one_of_dicts(elem, valid_dicts):\n return False\n else:\n raise InvalidSettingError()\n return True",
"def has_valid_values(self):\n for element, value in self.items():\n if not (0 <= value <= 1):\n return False\n return True",
"def have(keylist, dic):\n return all(key in dic and dic[key] for key in keylist)",
"def is_valid(self):\n return self.has_valid_values() and self.has_valid_sum()",
"def _hasValuesChecker(entity, params):\n \n for key, values in constraints.iteritems():\n if entity.__getattribute__(key) not in values:\n return False\n\n return True",
"def all_false(d):\n # false_count should be the same as the length of d\n false_count = sum(v is False for v in d.values())\n return false_count == len(d)",
"def _validate_values(self, sample):\n result = True\n paths = []\n #Search vor necessary paths accorduing to comparison_style\n if self._comparison_style == ComparisonStyle.minimum:\n paths = self._find_all_paths(self._reference)\n else:\n paths = self._find_all_paths(sample)\n # For every path, if it is endling in an key, validate the key\n for path in paths:\n reference_value = MappingValidator._get_value(self._reference,\n list(path))\n mapping_value = MappingValidator._get_value(sample, list(path))\n if isinstance(mapping_value, abc.Mapping):\n continue\n elif isinstance(reference_value, type):\n result = result and isinstance(mapping_value, reference_value)\n elif callable(reference_value):\n result = result and bool(reference_value(mapping_value))\n elif isinstance(reference_value, re._pattern_type):\n result = result and bool(reference_value.match(mapping_value))\n elif isinstance(reference_value, list):\n list_contains_sample_val = False\n for possibility in reference_value:\n if possibility == mapping_value:\n list_contains_sample_val = True\n break\n result = result and list_contains_sample_val\n elif reference_value is Ellipsis:\n result = result and True\n else:\n result = result and False\n if not result:\n break\n return result",
"def is_valid_value(self, value):\n return value in self.values",
"def _check_allowed_values(self, parameters):\n for key, allowed_values in self.ALLOWED_VALUES:\n self.log([u\"Checking allowed values for parameter '%s'\", key])\n if key in parameters:\n value = parameters[key]\n if value not in allowed_values:\n self._failed(u\"Parameter '%s' has value '%s' which is not allowed.\" % (key, value))\n return\n self.log(u\"Passed\")",
"def validate(self):\n validated = True \n # Check that all parameters exist in the self.parameters dictionary\n for param_name in self._SCALAR_PARAMETERS:\n if param_name not in self.parameters:\n LOG.critical('%s not found in %s', param_name, self.filename)\n validated = False \n \n for param_name in self._TABLE_PARAMETERS:\n if not all([elem for elem in self.parameters[param_name]]):\n LOG.critical('%s not found in %s', param_name, self.filename)\n validated = False\n \n return validated",
"def verify(self, values):\n s = set(values)\n if not s.issubset(self.keys):\n raise ValueError('unknown keys in values')",
"def verify(self, values):\n s = set(values)\n if not s.issubset(self.keys):\n raise ValueError, 'unknown keys in values'",
"def contains(dictionary, keys):\n if keys is None:\n return True\n keys = keys if isinstance(keys, list) else [keys]\n for key in keys:\n if key not in dictionary:\n return False\n return True",
"def is_valid(self):\n for location in self.locations.values():\n if not location.is_valid:\n return False\n return True",
"def _validate_dict_entry(self, dict_entry):\r\n try:\r\n # Type-check all of the type-critical items.\r\n if (\r\n type(dict_entry[\"id\"]) == int and\r\n type(dict_entry[\"date\"]) == datetime.date and\r\n type(dict_entry[\"time\"]) == datetime.time and\r\n type(dict_entry[\"datetime\"]) == datetime.datetime and\r\n type(dict_entry[\"duration\"]) == datetime.timedelta):\r\n return True\r\n else:\r\n return False\r\n # end if\r\n except Exception as err:\r\n _z_exc(\"logentry.py/_validate_dict_entry\", err)\r\n # end try\r",
"def has_keys(self) -> bool:\n \n for key, value in self.key_satified.items():\n if value is not True:\n return False\n return True",
"def is_valid(self):\n sum_prob_per_var = {}\n for rule in self.rules:\n var, prob = rule.variable, rule.probability\n if prob < 0:\n return False\n sum_prob_per_var[var] = sum_prob_per_var.get(var, 0) + prob\n return all(sum_prob == 1.0 for sum_prob in sum_prob_per_var.values())",
"def check_condition(self, query_dict):\n return all(key in self.__data and self.__data[key] == value\n for key, value in query_dict.items())",
"def eval_dict(self, value):\n\n okay = True\n if all(ast_class(k) == 'Str' for k in value.keys):\n count = 0\n for v in value.values:\n if not self.eval_value(v):\n okay = False\n break\n count += 1\n return okay",
"def is_valid_query(query: Dict[str, Any]) -> bool:\n for name, value in query.items():\n if is_illegal_surrogate(name) or is_illegal_surrogate(value):\n return False\n return True"
] | [
"0.80089605",
"0.79015756",
"0.68252265",
"0.66869044",
"0.6565693",
"0.6309288",
"0.6258104",
"0.60240465",
"0.5989314",
"0.5946436",
"0.5847185",
"0.577302",
"0.57671636",
"0.56919193",
"0.56556475",
"0.56521857",
"0.56458354",
"0.5585945",
"0.5582982",
"0.55797684",
"0.557541",
"0.55745274",
"0.5574059",
"0.5564927",
"0.55628663",
"0.5560761",
"0.55222344",
"0.5502098",
"0.5499082",
"0.54920614"
] | 0.87256515 | 0 |
raise InvalidSettingError if primitive (int, float, bool, str) value |v| is not in list |valid_v| | def _primitive_validity_check(v, valid_v):
if not Settings._is_in_prim(v, valid_v):
raise InvalidSettingError() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _list_validity_check(l, valid_l):\n\n if not Settings._is_in_list(l, valid_l):\n raise InvalidSettingError()",
"def _validate_value(self, val):\r\n if type(val) in (int, long, float, str, unicode, ):\r\n return val\r\n if isinstance(val, tuple) or isinstance(val, frozenset):\r\n for i in val:\r\n self._validate_value(i)\r\n return val\r\n raise TypeError(\r\n \"Only number/strings and tuples/frozensets allowed here.\",\r\n )",
"def _check_valid_value(self, value):\n if self._possible_values is None: # validation not defined (profile)\n return\n if value in self._possible_values:\n return\n if value is not None and \"ANY\" in self._possible_values:\n return\n msg = (\"'%s' is not a valid 'options.%s' value.\\nPossible values are %s\"\n % (value, self._name, self._possible_values))\n raise ConanException(msg)",
"def _validate_init_control(self, v):\n if v is None or v == 0:\n return v\n if len(v) == 2:\n return tuple([float(x) for x in v])",
"def __allowed_values_incorrect_list_with_allowed_values(self):\n strTestName = 'Value NaN given in a list with allowed values (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy Array 2D')\n RxCSObject.paramAllowed('parameter1', range(int(2e3)) + [np.NaN])\n RxCSObject.parameter1 = np.random.randint(1, 1e3, (1e2, 1e1))\n\n self.__parametersCheck_error(RxCSObject, ValueError, strTestName)",
"def _check_value(self,val):\n if self.allow_None and val is None:\n return\n\n if not _is_number(val):\n raise ValueError(\"Parameter '%s' only takes numeric values\"%(self._attrib_name))\n \n self._checkBounds(val)",
"def validate_settings(self, settings):\n pass",
"def validate(cls, v):\n return v",
"def _check_allowed_values(self, key: str, value: Any):\n allowedValues = from_dot_notation(\n field=\".\".join([*self.parents, key]), obj=self.definition\n ).get(\"allowedValues\", None)\n if allowedValues is not None and value not in allowedValues:\n raise Exception(\n f\"Value '{value}' is not an allowed value for '{key}'. Allowed values are: {', '.join(allowedValues)}\"\n )",
"def _validate(self, value, **options):\n\n super()._validate(value, **options)\n\n current_invalid = self.invalid_values\n if value in current_invalid:\n raise self.in_value_error(self.in_value_message.format(\n param_name=self._get_field_name(**options),\n values=self._get_list_representation(current_invalid)))",
"def __allowed_values_inccorrect_number(self):\n strTestName = 'Values of a number (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'number #1')\n RxCSObject.paramAddMan('parameter2', 'number #2')\n RxCSObject.paramAllowed('parameter2', range(10))\n\n RxCSObject.parameter1 = 11\n RxCSObject.parameter2 = 1.4\n\n self.__parametersCheck_error(RxCSObject, AllowedValuesError, strTestName)",
"def is_valid(self, value):\r\n pass",
"def _validity_check(settings, valid):\n\n Settings._dict_validity_check(settings, valid)",
"def __allowed_values_incorrect_list(self):\n strTestName = 'Values of a list (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'tuple')\n RxCSObject.paramAddMan('parameter2', 'list')\n\n RxCSObject.paramAllowed('parameter2', ('Allowed string #1', 'Allowed string #2', 3, 4, 11))\n RxCSObject.parameter1 = (1, 3, 4)\n RxCSObject.parameter2 = [11, 3, 'Allowed string #1', 'Allowed string #11']\n\n self.__parametersCheck_error(RxCSObject, AllowedValuesError, strTestName)",
"def test_incompatible_option_type(key, value):\n wrong_types = {int, str, list, bool} - {type(value)}\n for wrong_type in wrong_types:\n test_value = wrong_type()\n with pytest.raises(InputError):\n _check_input_config({key: test_value})",
"def clean(self, **kwargs):\n super().clean()\n\n # Encode as native values\n if self.is_int():\n self.value = self.as_int()\n\n elif self.is_bool():\n self.value = self.as_bool()\n\n validator = self.__class__.get_setting_validator(self.key, **kwargs)\n\n if validator is not None:\n self.run_validator(validator)\n\n options = self.valid_options()\n\n if options and self.value not in options:\n raise ValidationError(_(\"Chosen value is not a valid option\"))",
"def check_supported(check_val_list, valid_meas_dic):\r\r\n invalid_list = []\r\r\n\r\r\n for val in check_val_list:\r\r\n try:\r\r\n dummy = valid_meas_dic[val]\r\r\n except KeyError:\r\r\n invalid_list.append(val)\r\r\n\r\r\n if invalid_list:\r\r\n errMsg = (\"The following is unsupported %s\" %invalid_list)\r\r\n errMsg = errMsg + (\"\\nThe list of valid values is %s\" %valid_meas_dic.keys())\r\r\n raise ExGeneral(errMsg)",
"def __allowed_values_correct_number(self):\n strTestName = 'Values of a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'number #1')\n RxCSObject.paramAddMan('parameter2', 'number #2')\n RxCSObject.paramAllowed('parameter2', range(10))\n\n RxCSObject.parameter1 = 11\n RxCSObject.parameter2 = 0\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def test_validate_bad_data(self, value):\n opt = scheme.ListOption('test-opt')\n with pytest.raises(errors.SchemeValidationError):\n opt.validate('foo', value)",
"def _check_helper(self, value, raise_exceptions=True) -> bool:\n if not isinstance(value, self.value_type):\n if raise_exceptions:\n raise InvalidParameterException(\n '%s: invalid type given: %s (required %s)' % (\n self.name, type(value),\n ', '.join([str(x) for x in self.value_type])\n )\n )\n return False\n\n return True",
"def validate_settings(_cfg, _ctx):\n pass",
"def _dict_validity_check(d, valid_d):\n\n if not Settings._is_in_dict(d, valid_d):\n raise InvalidSettingError()",
"def __allowed_values_incorrect_vector(self):\n strTestName = 'Values of a Numpy Array 1D (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy Array 1D')\n RxCSObject.paramAllowed('parameter1', range(int(1e4)))\n\n vA = np.random.randint(1, 1e3, 1e3)\n vA[vA.size - 1] = 2e4\n RxCSObject.parameter1 = vA\n\n self.__parametersCheck_error(RxCSObject, AllowedValuesError, strTestName)",
"def value_constraint(self, node, value, allowed):\n self.constraint(node, value in allowed,\n 'Invalid value \"%s\" for \"standalone\"! Must be one of %s.' % (value, str(allowed)))",
"def test_configurations_create_invalid_value_type(self):\n values = '{\"key_buffer_size\": \"this is a string not int\"}'\n assert_unprocessable(instance_info.dbaas.configurations.create,\n CONFIG_NAME, values, CONFIG_DESC)",
"def check_value(self, value):",
"def test_validation_modes(member, set_values, values, raising_values):\n\n class MemberTest(Atom):\n m = member\n\n tester = MemberTest()\n for sv, v in zip(set_values, values):\n tester.m = sv\n assert tester.m == v\n\n for rv in raising_values:\n with pytest.raises(\n OverflowError\n if (isinstance(member, Int) and isinstance(rv, float) and rv > 2**32)\n else ValueError\n if isinstance(member, Enum)\n else TypeError\n ):\n tester.m = rv",
"def _validate_internal_value_list(self, value: Any):\n # Make sure the value is a list\n if not isinstance(value, list):\n raise TypeError(f\"Value must be a list\")\n\n # Make sure the list has the correct number of values in it\n length = len(value)\n if isinstance(self._nargs, int):\n if self._action == \"store\":\n if length != self._nargs:\n raise ValueError(f\"Requires list of length {self._nargs}\")\n else:\n if length == 0 or length % self._nargs != 0:\n raise ValueError(f\"Requires list of length a multiple of {self._nargs}\")\n elif self._nargs == \"+\":\n if length == 0:\n raise ValueError(f\"Requires at least one item\")\n\n if any(not isinstance(element, self._type) for element in value):\n raise TypeError(f\"All values must be of the declared type {self._type.__name__}\")",
"def test__validate_status__1():\n for input_value in (\n 12.6,\n ):\n with vampytest.assert_raises(TypeError):\n validate_status(input_value)",
"def validate_values(self):\n if self.avp_def.has_defined_values():\n defined_values = dict(self.avp_def.attr_defined_values)\n if self.avp_value not in defined_values.values():\n raise ValueError(\n f\"{self.avp_def.attr_name} - value {self.avp_value} \"\n \"is not allowed\")\n\n return True"
] | [
"0.6487991",
"0.62959874",
"0.6258109",
"0.6215541",
"0.618264",
"0.61013937",
"0.6074505",
"0.60655147",
"0.60485035",
"0.6044406",
"0.6028974",
"0.6027474",
"0.6026257",
"0.60188854",
"0.59915787",
"0.59838104",
"0.59746593",
"0.59627825",
"0.5954613",
"0.5924983",
"0.5924046",
"0.5918102",
"0.59146553",
"0.5907738",
"0.5893869",
"0.58836544",
"0.5848853",
"0.5839311",
"0.58384377",
"0.578218"
] | 0.82608044 | 0 |
raise InvalidSettingError if list |l| is not in list |valid_l| where \"in\" semantics are aligned with Settings._is_in_list(), so see the doc for that | def _list_validity_check(l, valid_l):
if not Settings._is_in_list(l, valid_l):
raise InvalidSettingError() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _is_in_list(l, valid_l):\n\n for elem in l:\n if Settings._is_primitive(elem):\n if not Settings._is_in_prim(elem, valid_l):\n return False\n elif Settings._is_list(elem):\n valid_lists = [l for l in valid_l if isinstance(l, list)]\n if not Settings._is_sublist_in_one_of_lists(elem, valid_lists):\n return False\n elif Settings._is_dict(elem):\n valid_dicts = [d for d in valid_l if isinstance(d, dict)]\n if not Settings._is_dict_in_one_of_dicts(elem, valid_dicts):\n return False\n else:\n raise InvalidSettingError()\n return True",
"def validate_list(self, field: str, valid_options: List[str]):\n val = getattr(self, field)\n if isinstance(val, list):\n for v in val:\n if v not in valid_options:\n raise ConfigError(f'{v} is not a valid option for {field}')\n else:\n if val not in valid_options:\n raise ConfigError(f'{val} is not a valid option for {field}')",
"def _validate(self, value, **options):\n\n super()._validate(value, **options)\n\n current_valid = self.valid_values\n if value not in current_valid:\n raise self.not_in_value_error(self.not_in_value_message.format(\n param_name=self._get_field_name(**options),\n values=self._get_list_representation(current_valid)))",
"def _validate(self, value, **options):\n\n super()._validate(value, **options)\n\n current_invalid = self.invalid_values\n if value in current_invalid:\n raise self.in_value_error(self.in_value_message.format(\n param_name=self._get_field_name(**options),\n values=self._get_list_representation(current_invalid)))",
"def __allowed_values_incorrect_list(self):\n strTestName = 'Values of a list (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'tuple')\n RxCSObject.paramAddMan('parameter2', 'list')\n\n RxCSObject.paramAllowed('parameter2', ('Allowed string #1', 'Allowed string #2', 3, 4, 11))\n RxCSObject.parameter1 = (1, 3, 4)\n RxCSObject.parameter2 = [11, 3, 'Allowed string #1', 'Allowed string #11']\n\n self.__parametersCheck_error(RxCSObject, AllowedValuesError, strTestName)",
"def set_in(self, val):\n if not contain_in_list_equal(val, PARAM_INS):\n raise ArgumentError(\"[WARNING] `in`, should be \" + \", \".join(PARAM_INS))\n self._in = val\n pass",
"def _dict_validity_check(d, valid_d):\n\n if not Settings._is_in_dict(d, valid_d):\n raise InvalidSettingError()",
"def set_list(section, option, l):\n\ts = escape_join(\",\", l)\n\n\tif None == s:\n\t\treturn False\n\n\treturn set(section, option, s)",
"def list_should_contain_value(self,list_,value,msg=None):\r\n\r\n default =\"%s contains value '%s'\" %(seq2str(list_),value)\r\n _verify_condition(vlaue not in list_,default,msg)",
"def _check_keys(setting_dict):\n for key in SettingContainer.key_list:\n if not key in setting_dict:\n raise Exception(\n f\"No value for {key} found in language-settings\")",
"def validate_settings(_cfg, _ctx):\n pass",
"def assertInList(value, values, msg):\n\tassert value in values, msg",
"def validate_settings(self, settings):\n pass",
"def check_invalid_items(**kwargs: Tuple[T, Iterable[T]]):\n for key, (value, possible) in kwargs.items():\n possible = set(possible)\n if value not in possible:\n raise ValueError(f\"{key}={value} is not in: {possible}\")",
"def is_in_list(item, list_, kind):\n if item not in list_:\n raise KeyError(f'Specify {kind} from {list_}: got {item}')\n return True",
"def validateListValue(self, list_name, list_value):\n try:\n con = self.getMetadataDatabaseConnection()\n results = 0\n results = con.cursor().callproc('qiime_assets.validate_list_value', [list_name, list_value, results])\n return results[2]\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False",
"def validatesettings(self, eventlist=None):\n if (eventlist == None):\n eventlist = EventList()\n #\n self.validate_setting_config(eventlist, mconst.DEF_SETTINGNAME_pkgdirimps_sitempacks, False, \"no directory will be scanned for site-specific extensions.\")\n self.validate_setting_config(eventlist, mconst.DEF_SETTINGNAME_controllerroot, False, \"no site-default specified for controller root.\")\n # required stuff\n self.validate_setting_config(eventlist, mconst.DEF_SETTINGNAME_siteurl_relative, True, \"site has no relative url specified; assumed to start at root (/).\")\n self.validate_setting_config(eventlist, mconst.DEF_SETTINGNAME_siteurl_absolute, True, \"site has no absolute url address.\")\n self.validate_setting_config(eventlist, mconst.DEF_SETTINGNAME_sitefilepath, True, \"site has no filepath specified for it's home directory.\")\n\n # return events encountered\n return eventlist",
"def _validate_internal_value_list(self, value: Any):\n # Make sure the value is a list\n if not isinstance(value, list):\n raise TypeError(f\"Value must be a list\")\n\n # Make sure the list has the correct number of values in it\n length = len(value)\n if isinstance(self._nargs, int):\n if self._action == \"store\":\n if length != self._nargs:\n raise ValueError(f\"Requires list of length {self._nargs}\")\n else:\n if length == 0 or length % self._nargs != 0:\n raise ValueError(f\"Requires list of length a multiple of {self._nargs}\")\n elif self._nargs == \"+\":\n if length == 0:\n raise ValueError(f\"Requires at least one item\")\n\n if any(not isinstance(element, self._type) for element in value):\n raise TypeError(f\"All values must be of the declared type {self._type.__name__}\")",
"def verify_settings(settings):\r\n\r\n if 'limit' not in settings:\r\n iridium_manager_tracer.warning(\"Settings: \" \\\r\n \"'limit' option must be defined!\")\r\n return False\r\n\r\n if type(settings['limit']) != int:\r\n iridium_manager_tracer.warning(\"Settings: 'limit' must be an int!\")\r\n return False\r\n\r\n if 'limit_interval' not in settings:\r\n iridium_manager_tracer.warning(\"Settings: \" \\\r\n \"'limit_interval' option must be defined!\")\r\n return False\r\n\r\n if type(settings['limit_interval']) != str:\r\n iridium_manager_tracer.warning(\"Settings: \" \\\r\n \"'limit_interval' must be an str!\")\r\n return False\r\n\r\n # Force limit interval setting to always be lower case\r\n settings['limit_interval'] = settings['limit_interval'].lower()\r\n\r\n values = ''\r\n for item in TimeIntervals:\r\n if settings['limit_interval'] == item['name']:\r\n break\r\n values += item['name'] + ', '\r\n else:\r\n iridium_manager_tracer.warning(\"Settings: \" \\\r\n \"'limit_interval' must be one of the following: %s\", values)\r\n return False\r\n\r\n return True",
"def in_list(value, arg):\r\n return value in arg",
"def test_str_in_str_list(self):\n # compact ver sion (env variables)\n assert_that(Condition.is_valid(\n '\"{{ env.BRANCH_NAME }}\" in [\"dev\", \"prod\"]'), equal_to(True))\n # more spaces around are allowed (env variables)\n assert_that(Condition.is_valid(\n ' \"{{ env.BRANCH_NAME }}\" in [ \"dev\", \"prod\" ] '), equal_to(True))\n # compact version (task variables)\n assert_that(Condition.is_valid(\n '\"{{ variables.cpu_count }}\" in [\"1\", \"2\"]'), equal_to(True))",
"def _validity_check(settings, valid):\n\n Settings._dict_validity_check(settings, valid)",
"def __allowed_values_incorrect_list_with_allowed_values(self):\n strTestName = 'Value NaN given in a list with allowed values (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy Array 2D')\n RxCSObject.paramAllowed('parameter1', range(int(2e3)) + [np.NaN])\n RxCSObject.parameter1 = np.random.randint(1, 1e3, (1e2, 1e1))\n\n self.__parametersCheck_error(RxCSObject, ValueError, strTestName)",
"def check_for_list(check):",
"def _check_valid_value(self, value):\n if self._possible_values is None: # validation not defined (profile)\n return\n if value in self._possible_values:\n return\n if value is not None and \"ANY\" in self._possible_values:\n return\n msg = (\"'%s' is not a valid 'options.%s' value.\\nPossible values are %s\"\n % (value, self._name, self._possible_values))\n raise ConanException(msg)",
"def _listContains(self, l, entry):\n for i in range(0, len(l)):\n if l[i] == entry:\n return True\n return False",
"def is_in(self, e):\n return e in self.vals",
"def _inlist(self, source, target):\n source = \"\" if not source else source\n target = \"\" if not target else target\n source_list = source.split(\",\")\n target_list = target.split(\",\")\n return 1 if len([value for value in source_list if value in target_list]) > 0 else 0",
"def _check_list(self, input_list, switch_list):\n\n return_list = []\n for vid in input_list:\n if str(vid) in switch_list:\n return_list.append(vid)\n return return_list",
"def _check_settings_validity(self, settings: list):\n\n if isinstance(settings, list):\n # if list is empty\n if not settings:\n raise ValueError('The given settings are an empty list, please make sure to add a dictionary with a key \\'CLF_NAME\\' and a corresponding classfier name as value. You can specify hyperparameters for the classifier with the key \\'HYPERPARAMS\\'.')\n \n # if not all entries in the list are of type dict raise an error\n if not all(isinstance(s, dict) for s in settings):\n raise TypeError(f'Elements in settings are expected to be of type \\'dict\\'.')\n\n for setting in settings:\n # if there is no CLF_NAME key in the dict of the setting entry raise an error\n if 'CLF_NAME' not in setting.keys():\n raise KeyError(f'Every entry in settings is required to have a \\'CLF_NAME\\' key, please make sure that this key exists in every entry in settings.')\n \n # get the classifier and its corresponding parameters\n classifier = self._get_classifier_to_name(setting['CLF_NAME'])\n\n # check if the classifier also has a predict_proba() function\n if not(hasattr(classifier,'predict_proba') and callable(getattr(classifier,'predict_proba'))):\n raise ValueError('')\n \n clf_params_keys = classifier.get_params().keys()\n\n # check if hyperparameters are given as list or as dict\n if 'HYPERPARAMS' in setting.keys():\n hyperparams = setting['HYPERPARAMS']\n\n # if given as list, all elements in the list must be of type dict\n if isinstance(hyperparams, list):\n # if hyperparameter list is empty\n if not hyperparams:\n raise ValueError('The given hyperparameters are an empty list, please make sure to add hyperparameters as \\'dict\\' where a key represents the parameter name and the value is the parameter value/values wrapped in a list.')\n\n if not all(isinstance(s, dict) for s in settings):\n raise TypeError(f'Elements in the settings hyperparameters are expected to be of type \\'dict\\'.')\n \n # loop through the dicts in HYPERPARAMS\n for hyperparams_entry in hyperparams:\n # for each dict check if the keys are valid paramters of the corresponding classifier\n for hyperparams_entry_key in hyperparams_entry.keys():\n # check if the value to the key is a list otherwise raise an error:\n hyperparams_entry_value = hyperparams_entry[hyperparams_entry_key]\n \n if not isinstance(hyperparams_entry_value, list):\n raise TypeError(f'The hyperparameter {hyperparams_entry_key} in the {classifier.__class__.__name__} settings must be of type \\'list\\', got type \\'{type(hyperparams_entry_value).__name__}\\', make sure that every specified hyperparameter is wrapped in a list.')\n\n # if the parameter value list is empty\n if not hyperparams_entry_value:\n raise ValueError(f'Valuelist for hyperparameter {hyperparams_entry_key} is empty. Please specify values for the hyperparameter {hyperparams_entry_key} or remove it from HYPERPARAMS.')\n\n # if the key is not in the parameters specified by sklearn raise an error\n if not hyperparams_entry_key in clf_params_keys:\n raise NameError(f'The specified hyperparameter {hyperparams_entry_key} is not a supported paramter of {classifier.__class__.__name__}, make sure to only use supported parameters (see the sklearn documentation of {classifier.__class__.__name__} for a list of valid parameters).')\n \n # if given as dict just check if the keys are valid paramters of the corresponding classifier\n elif isinstance(hyperparams, dict):\n for hyperparam_key in hyperparams.keys():\n # check if the value to the key is a list otherwise raise an error:\n hyperparams_value = hyperparams[hyperparam_key]\n\n if not isinstance(hyperparams_value, list):\n raise TypeError(f'The hyperparameter {hyperparam_key} in the {classifier.__class__.__name__} settings must be of type \\'list\\', got type \\'{type(hyperparams_value).__name__}\\', make sure that every specified hyperparameter is wrapped in a list.')\n \n # if the key is not in the parameters specified by sklearn raise an error\n if not hyperparam_key in clf_params_keys:\n raise NameError(f'The specified hyperparameter {hyperparam_key} is not a supported paramter of {classifier.__class__.__name__}, make sure to only use supported parameters (see the sklearn documentation of {classifier.__class__.__name__} for a list of valid parameters).')\n\n else:\n raise TypeError(f'Hyperparameters in settings must be either of type \\'dict\\' or \\'list\\', got type \\'{type(hyperparams).__name__}\\'')\n\n else:\n raise TypeError(f'Settings must be of type \\'list\\', passed settings are of type \\'{type(settings).__name__}\\'')"
] | [
"0.719088",
"0.62734437",
"0.6226575",
"0.587397",
"0.58457685",
"0.57075506",
"0.56937474",
"0.5466414",
"0.5451235",
"0.5410411",
"0.54094964",
"0.54041463",
"0.5403871",
"0.53722614",
"0.5351291",
"0.5345841",
"0.5297422",
"0.5291149",
"0.52697456",
"0.5263931",
"0.52288085",
"0.5228287",
"0.5208821",
"0.51891124",
"0.5179075",
"0.51689357",
"0.5166085",
"0.5130272",
"0.5128933",
"0.5112151"
] | 0.87735647 | 0 |
raise InvalidSettingError if dict |d| is not in dict |valid_d| where \"in\" semantics are aligned with Settings._is_in_dict(), so see the doc for that | def _dict_validity_check(d, valid_d):
if not Settings._is_in_dict(d, valid_d):
raise InvalidSettingError() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _is_in_dict(d, valid_d):\n\n for k, v in d.items():\n if k not in valid_d:\n return False\n else:\n if Settings._is_primitive(v):\n if not Settings._is_in_prim(v, valid_d[k]):\n return False\n elif Settings._is_list(v):\n if not Settings._is_in_list(v, valid_d[k]):\n return False\n elif Settings._is_dict(v):\n if isinstance(valid_d[k], dict):\n if not Settings._is_in_dict(v, valid_d[k]):\n return False\n elif isinstance(valid_d[k], list):\n if not Settings._is_dict_in_one_of_dicts(v, valid_d[k]):\n return False\n else:\n raise InvalidSettingError()\n else:\n raise InvalidSettingError()\n return Settings._has_all_keys_from(d, valid_d)",
"def _validity_check(settings, valid):\n\n Settings._dict_validity_check(settings, valid)",
"def _check_keys(setting_dict):\n for key in SettingContainer.key_list:\n if not key in setting_dict:\n raise Exception(\n f\"No value for {key} found in language-settings\")",
"def _is_dict_in_one_of_dicts(d, dicts):\n\n for vd in dicts:\n if Settings._is_in_dict(d, vd):\n return True\n return False",
"def validate_settings(self, settings):\n pass",
"def _list_validity_check(l, valid_l):\n\n if not Settings._is_in_list(l, valid_l):\n raise InvalidSettingError()",
"def check_settings_syntax(settings_dict: dict, settings_metadata_dict: dict):\n try:\n f_root(**settings_dict)\n except ValidationError as e:\n msg = ''\n for error in e.errors():\n loc = error['loc']\n error_msg = \"Validation error for setting {}, bad value: {} (value origin: {})\\n\".format(\n '->'.join(str(x) for x in loc),\n get_pydantic_error_value(settings_dict, loc),\n settings_metadata_dict[loc[0]]\n )\n error_msg += \"Message: {}\\n\".format(error['msg'])\n msg += error_msg\n logger.error(msg)\n raise SettingsSyntaxError(msg)",
"def validate_settings(_cfg, _ctx):\n pass",
"def _has_all_keys_from(d, valid_d):\n\n for k, v in valid_d.items():\n if k not in d:\n return False\n return True",
"def test_RestrictingNodeTransformer__visit_In_Dict():\n assert restricted_eval('2 in {1: 1, 2: 2, 3: 3}') is True",
"def valid_cfg(cfg):\n\t\tif not isinstance(cfg, dict):\n\t\t\traise TypeError('Config should be a python dictionary')\n\t\treturn cfg",
"def test_check_required_success():\n settings = SettingsModel()\n # Tamper required settings\n settings._required_settings = (\"FOO\", \"PLOP\")\n\n settings.load_from_kwargs(\n FOO=True,\n BAR=True,\n check=False,\n defaults=False,\n )\n\n with pytest.raises(InvalidSettings):\n settings.check()\n\n settings.load_from_kwargs(PLOP=True, check=False, defaults=False)\n\n settings.check()",
"def _check_allowed_values(self, key: str, value: Any):\n allowedValues = from_dot_notation(\n field=\".\".join([*self.parents, key]), obj=self.definition\n ).get(\"allowedValues\", None)\n if allowedValues is not None and value not in allowedValues:\n raise Exception(\n f\"Value '{value}' is not an allowed value for '{key}'. Allowed values are: {', '.join(allowedValues)}\"\n )",
"def check_parameter_existence(self, d: dict, params: list):\n for param_name in params:\n if param_name not in d:\n raise Exception('Expecting the parameter \"' + param_name\n + '\" but cannot find it.')",
"def settings_validate(ctx):\n path = ctx.obj['load_path']\n if not path:\n _raise_settings_not_found()\n with open(path) as handle:\n config_dict = json.load(handle)\n try:\n config.validate_config(config_dict)\n except exceptions.ConfigValidationError as err:\n raise click.ClickException(\n '{} is invalid: '.format(path) + err.message\n ) from err",
"def validate_and_transfer_matching_settings(self, origin_settings, destination_settings):\n for name, dest_value in destination_settings.items():\n if origin_settings.Has(name): # Validate and transfer value.\n orig_value = origin_settings[name]\n if dest_value.IsDouble() and orig_value.IsDouble():\n destination_settings[name].SetDouble(origin_settings[name].GetDouble())\n elif dest_value.IsInt() and orig_value.IsInt():\n destination_settings[name].SetInt(origin_settings[name].GetInt())\n elif dest_value.IsBool() and orig_value.IsBool():\n destination_settings[name].SetBool(origin_settings[name].GetBool())\n elif dest_value.IsString() and orig_value.IsString():\n destination_settings[name].SetString(origin_settings[name].GetString())\n elif dest_value.IsArray() and orig_value.IsArray():\n if dest_value.size() != orig_value.size():\n raise Exception('len(\"' + name + '\") != ' + str(dest_value.size()))\n for i in range(dest_value.size()):\n if dest_value[i].IsDouble() and orig_value[i].IsDouble():\n dest_value[i].SetDouble(orig_value[i].GetDouble())\n elif dest_value[i].IsInt() and orig_value[i].IsInt():\n dest_value[i].SetInt(orig_value[i].GetInt())\n elif dest_value[i].IsBool() and orig_value[i].IsBool():\n dest_value[i].SetBool(orig_value[i].GetBool())\n elif dest_value[i].IsString() and orig_value[i].IsString():\n dest_value[i].SetString(orig_value[i].GetString())\n elif dest_value[i].IsSubParameter() and orig_value[i].IsSubParameter():\n self.validate_and_transfer_matching_settings(orig_value[i], dest_value[i])\n if len(orig_value[i].items()) != 0:\n raise Exception('Json settings not found in default settings: ' + orig_value[i].PrettyPrintJsonString())\n else:\n raise Exception('Unsupported parameter type.')\n elif dest_value.IsSubParameter() and orig_value.IsSubParameter():\n self.validate_and_transfer_matching_settings(orig_value, dest_value)\n if len(orig_value.items()) != 0:\n raise Exception('Json settings not found in default settings: ' + orig_value.PrettyPrintJsonString())\n else:\n raise Exception('Unsupported parameter type.')\n origin_settings.RemoveValue(name)",
"def validate_dict_contains_value(dictionary, dict_name, value, yaml_file):\n\n if value not in dictionary:\n raise ClowderYAMLError(fmt.missing_entry_error(value, dict_name, yaml_file))",
"def validate_dict(in_dict, **kwargs):\n\n if not isinstance(in_dict, dict):\n raise ValueError('requires a dictionary')\n\n for key, value in iteritems(kwargs):\n\n if key == 'required':\n for required_key in value:\n if required_key not in in_dict:\n return False\n\n elif key not in in_dict:\n continue\n\n elif value == bool:\n\n in_dict[key] = (True\n if str(in_dict[key]).lower() == 'true'\n else False)\n\n else:\n\n if (isinstance(in_dict[key], list) and\n len(in_dict[key]) == 1 and\n value != list):\n in_dict[key] = in_dict[key][0]\n\n try:\n if key in in_dict:\n in_dict[key] = value(in_dict[key])\n except ValueError:\n return False\n\n return True",
"def _check_valid_value(self, value):\n if self._possible_values is None: # validation not defined (profile)\n return\n if value in self._possible_values:\n return\n if value is not None and \"ANY\" in self._possible_values:\n return\n msg = (\"'%s' is not a valid 'options.%s' value.\\nPossible values are %s\"\n % (value, self._name, self._possible_values))\n raise ConanException(msg)",
"def dictionary_shoule_contain_value(self,dictionary,value,msg=None):\r\n default = \"Dictionary does not contain value '%s'\"%value\r\n _verify_condition(value in dictionary.values(),default,msg)",
"def check_invalid_items(**kwargs: Tuple[T, Iterable[T]]):\n for key, (value, possible) in kwargs.items():\n possible = set(possible)\n if value not in possible:\n raise ValueError(f\"{key}={value} is not in: {possible}\")",
"def _check_if_in_config(config, *keys):\n for key in keys:\n if key not in config:\n raise ValueError(f\"Config must contain key '{key}\")",
"def _validate(self, value, **options):\n\n super()._validate(value, **options)\n\n current_valid = self.valid_values\n if value not in current_valid:\n raise self.not_in_value_error(self.not_in_value_message.format(\n param_name=self._get_field_name(**options),\n values=self._get_list_representation(current_valid)))",
"def _primitive_validity_check(v, valid_v):\n\n if not Settings._is_in_prim(v, valid_v):\n raise InvalidSettingError()",
"def validate(dic, option_list):\n\tfor key in dic.viewkeys():\n\t\tif key in option_list:\n\t\t\tfor option in option_list:\n\t\t\t\tif option != key:\n\t\t\t\t\tif dic[option] and dic[key]:\n\t\t\t\t\t\traise click.UsageError('Invalid option combination --%s \\\n\t\t\t\t\t\t\tcannot be used with --%s' % (option, key))\n\n\treturn True",
"def check_valid_keys(self, obj):\n invalid_keys = [\n x for x in obj if x not in self.defaults\n ]\n if invalid_keys:\n raise ValueError(\n \"No such model parameters: %s. Valid parameters are: %s\"\n % (\" \".join(invalid_keys), \" \".join(self.defaults)))",
"def test_permlookupdict_in(self):\n pldict = PermLookupDict(MockUser(), \"mockapp\")\n with self.assertRaises(TypeError):\n self.EQLimiterObject() in pldict",
"def test_process_dict_false(self):\n\n self.assertNotIn('userB@domain', self.temp_set)",
"def __contains__(self, item):\n return self.settings.has(item)",
"def _validate_input_dict(self, input):\n if isinstance(input, dict):\n required = {\"type\", \"value\"}\n not_found = required - set(input.keys())\n if not_found:\n raise SpecificationError(\n \"Required key(s) not found in input dictionary: {}\".format(\n \", \".join(not_found)\n )\n )\n else:\n raise Exception(\"input element has to be a dictionary\")"
] | [
"0.7488277",
"0.6641955",
"0.6196617",
"0.59063935",
"0.5877175",
"0.57327497",
"0.57189006",
"0.5654708",
"0.5553554",
"0.53341234",
"0.5283865",
"0.5279843",
"0.5265277",
"0.5223105",
"0.52217144",
"0.5210006",
"0.5198275",
"0.51907164",
"0.5188572",
"0.51837295",
"0.51781255",
"0.5161453",
"0.5149008",
"0.5141537",
"0.5139074",
"0.5137223",
"0.51302856",
"0.5129849",
"0.51258403",
"0.5124888"
] | 0.86825794 | 0 |
error check |settings| and |valid|. Both are dict types. |settings| represents the user settings where each pair is a setting name associated to a chosen setting value. |valid| represents all valid user settings where each pair is a setting name associated to legal valid setting values. | def _validity_check(settings, valid):
Settings._dict_validity_check(settings, valid) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_settings(self, settings):\n pass",
"def validate_settings(_cfg, _ctx):\n pass",
"def _dict_validity_check(d, valid_d):\n\n if not Settings._is_in_dict(d, valid_d):\n raise InvalidSettingError()",
"def check_settings_syntax(settings_dict: dict, settings_metadata_dict: dict):\n try:\n f_root(**settings_dict)\n except ValidationError as e:\n msg = ''\n for error in e.errors():\n loc = error['loc']\n error_msg = \"Validation error for setting {}, bad value: {} (value origin: {})\\n\".format(\n '->'.join(str(x) for x in loc),\n get_pydantic_error_value(settings_dict, loc),\n settings_metadata_dict[loc[0]]\n )\n error_msg += \"Message: {}\\n\".format(error['msg'])\n msg += error_msg\n logger.error(msg)\n raise SettingsSyntaxError(msg)",
"def check_settings(app_configs, **kwargs):\n messages = []\n for name, setting in Setting.settings.items():\n try:\n setting.validate()\n except ValidationError as exc:\n msg = \"Error validating setting with value %s: %s\" % (setting.value, exc)\n messages.append(Error(msg, obj=name, id='settings.E001'))\n return messages",
"def verify_settings(settings):\r\n\r\n if 'limit' not in settings:\r\n iridium_manager_tracer.warning(\"Settings: \" \\\r\n \"'limit' option must be defined!\")\r\n return False\r\n\r\n if type(settings['limit']) != int:\r\n iridium_manager_tracer.warning(\"Settings: 'limit' must be an int!\")\r\n return False\r\n\r\n if 'limit_interval' not in settings:\r\n iridium_manager_tracer.warning(\"Settings: \" \\\r\n \"'limit_interval' option must be defined!\")\r\n return False\r\n\r\n if type(settings['limit_interval']) != str:\r\n iridium_manager_tracer.warning(\"Settings: \" \\\r\n \"'limit_interval' must be an str!\")\r\n return False\r\n\r\n # Force limit interval setting to always be lower case\r\n settings['limit_interval'] = settings['limit_interval'].lower()\r\n\r\n values = ''\r\n for item in TimeIntervals:\r\n if settings['limit_interval'] == item['name']:\r\n break\r\n values += item['name'] + ', '\r\n else:\r\n iridium_manager_tracer.warning(\"Settings: \" \\\r\n \"'limit_interval' must be one of the following: %s\", values)\r\n return False\r\n\r\n return True",
"def check_settings(self):\n pass",
"def check_settings(self):\r\n pass",
"def _check_settings_validity(self, settings: list):\n\n if isinstance(settings, list):\n # if list is empty\n if not settings:\n raise ValueError('The given settings are an empty list, please make sure to add a dictionary with a key \\'CLF_NAME\\' and a corresponding classfier name as value. You can specify hyperparameters for the classifier with the key \\'HYPERPARAMS\\'.')\n \n # if not all entries in the list are of type dict raise an error\n if not all(isinstance(s, dict) for s in settings):\n raise TypeError(f'Elements in settings are expected to be of type \\'dict\\'.')\n\n for setting in settings:\n # if there is no CLF_NAME key in the dict of the setting entry raise an error\n if 'CLF_NAME' not in setting.keys():\n raise KeyError(f'Every entry in settings is required to have a \\'CLF_NAME\\' key, please make sure that this key exists in every entry in settings.')\n \n # get the classifier and its corresponding parameters\n classifier = self._get_classifier_to_name(setting['CLF_NAME'])\n\n # check if the classifier also has a predict_proba() function\n if not(hasattr(classifier,'predict_proba') and callable(getattr(classifier,'predict_proba'))):\n raise ValueError('')\n \n clf_params_keys = classifier.get_params().keys()\n\n # check if hyperparameters are given as list or as dict\n if 'HYPERPARAMS' in setting.keys():\n hyperparams = setting['HYPERPARAMS']\n\n # if given as list, all elements in the list must be of type dict\n if isinstance(hyperparams, list):\n # if hyperparameter list is empty\n if not hyperparams:\n raise ValueError('The given hyperparameters are an empty list, please make sure to add hyperparameters as \\'dict\\' where a key represents the parameter name and the value is the parameter value/values wrapped in a list.')\n\n if not all(isinstance(s, dict) for s in settings):\n raise TypeError(f'Elements in the settings hyperparameters are expected to be of type \\'dict\\'.')\n \n # loop through the dicts in HYPERPARAMS\n for hyperparams_entry in hyperparams:\n # for each dict check if the keys are valid paramters of the corresponding classifier\n for hyperparams_entry_key in hyperparams_entry.keys():\n # check if the value to the key is a list otherwise raise an error:\n hyperparams_entry_value = hyperparams_entry[hyperparams_entry_key]\n \n if not isinstance(hyperparams_entry_value, list):\n raise TypeError(f'The hyperparameter {hyperparams_entry_key} in the {classifier.__class__.__name__} settings must be of type \\'list\\', got type \\'{type(hyperparams_entry_value).__name__}\\', make sure that every specified hyperparameter is wrapped in a list.')\n\n # if the parameter value list is empty\n if not hyperparams_entry_value:\n raise ValueError(f'Valuelist for hyperparameter {hyperparams_entry_key} is empty. Please specify values for the hyperparameter {hyperparams_entry_key} or remove it from HYPERPARAMS.')\n\n # if the key is not in the parameters specified by sklearn raise an error\n if not hyperparams_entry_key in clf_params_keys:\n raise NameError(f'The specified hyperparameter {hyperparams_entry_key} is not a supported paramter of {classifier.__class__.__name__}, make sure to only use supported parameters (see the sklearn documentation of {classifier.__class__.__name__} for a list of valid parameters).')\n \n # if given as dict just check if the keys are valid paramters of the corresponding classifier\n elif isinstance(hyperparams, dict):\n for hyperparam_key in hyperparams.keys():\n # check if the value to the key is a list otherwise raise an error:\n hyperparams_value = hyperparams[hyperparam_key]\n\n if not isinstance(hyperparams_value, list):\n raise TypeError(f'The hyperparameter {hyperparam_key} in the {classifier.__class__.__name__} settings must be of type \\'list\\', got type \\'{type(hyperparams_value).__name__}\\', make sure that every specified hyperparameter is wrapped in a list.')\n \n # if the key is not in the parameters specified by sklearn raise an error\n if not hyperparam_key in clf_params_keys:\n raise NameError(f'The specified hyperparameter {hyperparam_key} is not a supported paramter of {classifier.__class__.__name__}, make sure to only use supported parameters (see the sklearn documentation of {classifier.__class__.__name__} for a list of valid parameters).')\n\n else:\n raise TypeError(f'Hyperparameters in settings must be either of type \\'dict\\' or \\'list\\', got type \\'{type(hyperparams).__name__}\\'')\n\n else:\n raise TypeError(f'Settings must be of type \\'list\\', passed settings are of type \\'{type(settings).__name__}\\'')",
"def _check_keys(setting_dict):\n for key in SettingContainer.key_list:\n if not key in setting_dict:\n raise Exception(\n f\"No value for {key} found in language-settings\")",
"def test_check_required_success():\n settings = SettingsModel()\n # Tamper required settings\n settings._required_settings = (\"FOO\", \"PLOP\")\n\n settings.load_from_kwargs(\n FOO=True,\n BAR=True,\n check=False,\n defaults=False,\n )\n\n with pytest.raises(InvalidSettings):\n settings.check()\n\n settings.load_from_kwargs(PLOP=True, check=False, defaults=False)\n\n settings.check()",
"def test_check_required_fail():\n settings = SettingsModel()\n\n with pytest.raises(InvalidSettings):\n settings.check()",
"def _validate_settings(settings):\n invalid_opts = set(settings.keys()).difference(_SESS_OPTS)\n if invalid_opts:\n invalid_opts_list = \"', '\".join(invalid_opts)\n raise InterfaceError(f\"Invalid option(s): '{invalid_opts_list}'\")\n\n if \"routers\" in settings:\n for router in settings[\"routers\"]:\n _validate_hosts(router, 33060)\n elif \"host\" in settings:\n _validate_hosts(settings)\n\n if \"ssl-mode\" in settings:\n ssl_mode = settings[\"ssl-mode\"]\n try:\n settings[\"ssl-mode\"] = SSLMode(\n ssl_mode.lower().strip() if isinstance(ssl_mode, str) else ssl_mode\n )\n except (AttributeError, ValueError) as err:\n raise InterfaceError(f\"Invalid SSL Mode '{settings['ssl-mode']}'\") from err\n if \"ssl-ca\" not in settings and settings[\"ssl-mode\"] in [\n SSLMode.VERIFY_IDENTITY,\n SSLMode.VERIFY_CA,\n ]:\n raise InterfaceError(\"Cannot verify Server without CA\")\n\n if \"ssl-crl\" in settings and \"ssl-ca\" not in settings:\n raise InterfaceError(\"CA Certificate not provided\")\n\n if \"ssl-key\" in settings and \"ssl-cert\" not in settings:\n raise InterfaceError(\"Client Certificate not provided\")\n\n if \"ssl-ca\" in settings and settings.get(\"ssl-mode\") not in [\n SSLMode.VERIFY_IDENTITY,\n SSLMode.VERIFY_CA,\n SSLMode.DISABLED,\n ]:\n raise InterfaceError(\"Must verify Server if CA is provided\")\n\n if \"auth\" in settings:\n auth = settings[\"auth\"]\n try:\n settings[\"auth\"] = Auth(\n auth.lower().strip() if isinstance(auth, str) else auth\n )\n except (AttributeError, ValueError) as err:\n raise InterfaceError(f\"Invalid Auth '{settings['auth']}'\") from err\n\n if \"compression\" in settings:\n compression = settings[\"compression\"]\n try:\n settings[\"compression\"] = Compression(\n compression.lower().strip()\n if isinstance(compression, str)\n else compression\n )\n except (AttributeError, ValueError) as err:\n raise InterfaceError(\n \"The connection property 'compression' acceptable values are: \"\n \"'preferred', 'required', or 'disabled'. The value \"\n f\"'{settings['compression']}' is not acceptable\"\n ) from err\n\n if \"compression-algorithms\" in settings:\n if isinstance(settings[\"compression-algorithms\"], str):\n compression_algorithms = (\n settings[\"compression-algorithms\"].strip().strip(\"[]\")\n )\n if compression_algorithms:\n settings[\"compression-algorithms\"] = compression_algorithms.split(\",\")\n else:\n settings[\"compression-algorithms\"] = None\n elif not isinstance(settings[\"compression-algorithms\"], (list, tuple)):\n raise InterfaceError(\n \"Invalid type of the connection property 'compression-algorithms'\"\n )\n if settings.get(\"compression\") == Compression.DISABLED:\n settings[\"compression-algorithms\"] = None\n\n if \"connection-attributes\" in settings:\n _validate_connection_attributes(settings)\n\n if \"connect-timeout\" in settings:\n try:\n if isinstance(settings[\"connect-timeout\"], str):\n settings[\"connect-timeout\"] = int(settings[\"connect-timeout\"])\n if (\n not isinstance(settings[\"connect-timeout\"], int)\n or settings[\"connect-timeout\"] < 0\n ):\n raise ValueError\n except ValueError:\n raise TypeError(\n \"The connection timeout value must be a positive \"\n \"integer (including 0)\"\n ) from None\n\n if \"dns-srv\" in settings:\n if not isinstance(settings[\"dns-srv\"], bool):\n raise InterfaceError(\"The value of 'dns-srv' must be a boolean\")\n if settings.get(\"socket\"):\n raise InterfaceError(\n \"Using Unix domain sockets with DNS SRV lookup is not allowed\"\n )\n if settings.get(\"port\"):\n raise InterfaceError(\n \"Specifying a port number with DNS SRV lookup is not allowed\"\n )\n if settings.get(\"routers\"):\n raise InterfaceError(\n \"Specifying multiple hostnames with DNS SRV look up is not allowed\"\n )\n elif \"host\" in settings and not settings.get(\"port\"):\n settings[\"port\"] = 33060\n\n if \"tls-versions\" in settings:\n _validate_tls_versions(settings)\n\n if \"tls-ciphersuites\" in settings:\n _validate_tls_ciphersuites(settings)",
"def process_settings(self, settings):\n default_settings = self.default_settings()\n\n processed_settings = {}\n\n for key, value in default_settings.items():\n if key in settings:\n processed_settings[key] = settings[key]\n else:\n processed_settings[key] = value\n\n self.validate_settings(processed_settings)\n\n return processed_settings",
"def validate_settings(self):\n\t\t# Check all attributes exist\n\t\tfor key, value in vars(self).items():\n\t\t if hasattr(self, key) == False:\n\t\t\t\tUtility.report_error(1, '%s: Missing attribute \"%s\"' % (self._file_path, key))\n\n\t\t# Check mandatory attributes\n\t\tif self.is_valid_status(self.status) == False:\n\t\t\tUtility.report_error(1, '%s: Status \"%s\" is not valid' % (self._file_path, self.status))\n\n\t\tif self.definition == '' or self.definition == None:\n\t\t\tUtility.report_error(1, '%s: Definition field is empty or missing' % (self._file_path))\n\t\t\n\t\tif self.term == '' or self.term == None:\n\t\t\tUtility.report_error(1, '%s: Term field is empty or missing' % (self._file_path))\n\n\t\t# If status is neither approved or elaboration reject reason must be stated\n\t\tif (self.status == 'rejected' or self.status == 'replaced') and (self.status_reason == '' or self.status_reason == None):\n\t\t\tUtility.report_error(1, '%s: \"Status reason\" is missing, this is not allowed when status is \"%s\"' % (self._file_path, self.status))\n\n\t\t# If status is rejected a rejected by user must be specified\n\t\tif self.status == 'rejected' and (self.rejected_by == '' or self.rejected_by == None):\n\t\t\tUtility.report_error(1, '%s: \"Rejected by\" is missing, this is not allowed when status is \"%s\"' % (self._file_path, self.status))\n\n\t\t# If status is replaced then Replaced by must be specified\n\t\tif self.status == 'replaced' and (self.replaced_by == None or self.replaced == ''):\n\t\t\tUtility.report_error(1, '%s: \"Replaced by\" is missing, this is not allowed when status is \"%s\"' % (self._file_path, self.status))\n\n\t\tself.created_by = self.make_link_list('stakeholders', 'Created by', self.created_by, False)\n\t\tself.rejected_by = self.make_link_list('stakeholders', 'Rejected by', self.rejected_by, False)\n\t\tself.replaced_by = self.make_link_list('glossary', 'Replaced by', self.replaced_by)\n\n\t\tif self.is_string_date(self.created_on) == False:\n\t\t\tUtility.report_error(1, '%s: Created on field has value \"%s\", but it must be date in YYYY-MM-DD format' % (self._file_path, self.created_on))\n\n\t\tif self.is_string_date(self.rejected_on) == False:\n\t\t\tUtility.report_error(1, '%s: Rejected on field has value \"%s\", but it must be date in YYYY-MM-DD format' % (self._file_path, self.rejected_on))",
"def validate_and_transfer_matching_settings(self, origin_settings, destination_settings):\n for name, dest_value in destination_settings.items():\n if origin_settings.Has(name): # Validate and transfer value.\n orig_value = origin_settings[name]\n if dest_value.IsDouble() and orig_value.IsDouble():\n destination_settings[name].SetDouble(origin_settings[name].GetDouble())\n elif dest_value.IsInt() and orig_value.IsInt():\n destination_settings[name].SetInt(origin_settings[name].GetInt())\n elif dest_value.IsBool() and orig_value.IsBool():\n destination_settings[name].SetBool(origin_settings[name].GetBool())\n elif dest_value.IsString() and orig_value.IsString():\n destination_settings[name].SetString(origin_settings[name].GetString())\n elif dest_value.IsArray() and orig_value.IsArray():\n if dest_value.size() != orig_value.size():\n raise Exception('len(\"' + name + '\") != ' + str(dest_value.size()))\n for i in range(dest_value.size()):\n if dest_value[i].IsDouble() and orig_value[i].IsDouble():\n dest_value[i].SetDouble(orig_value[i].GetDouble())\n elif dest_value[i].IsInt() and orig_value[i].IsInt():\n dest_value[i].SetInt(orig_value[i].GetInt())\n elif dest_value[i].IsBool() and orig_value[i].IsBool():\n dest_value[i].SetBool(orig_value[i].GetBool())\n elif dest_value[i].IsString() and orig_value[i].IsString():\n dest_value[i].SetString(orig_value[i].GetString())\n elif dest_value[i].IsSubParameter() and orig_value[i].IsSubParameter():\n self.validate_and_transfer_matching_settings(orig_value[i], dest_value[i])\n if len(orig_value[i].items()) != 0:\n raise Exception('Json settings not found in default settings: ' + orig_value[i].PrettyPrintJsonString())\n else:\n raise Exception('Unsupported parameter type.')\n elif dest_value.IsSubParameter() and orig_value.IsSubParameter():\n self.validate_and_transfer_matching_settings(orig_value, dest_value)\n if len(orig_value.items()) != 0:\n raise Exception('Json settings not found in default settings: ' + orig_value.PrettyPrintJsonString())\n else:\n raise Exception('Unsupported parameter type.')\n origin_settings.RemoveValue(name)",
"def checkConf(settings, stanza=None, confInfo=None, throwExceptionOnError=False): \n # Below is a list of the required fields. The entries in this list will be removed as they\n # are observed. An empty list at the end of the config check indicates that all necessary\n # fields where provided.\n required_fields = Suppressions.REQUIRED_PARAMS[:]\n \n if stanza is not None and confInfo is not None:\n # Add each of the settings\n for key, val in settings.items():\n # Set val to empty if None\n if val is None:\n val = ''\n \n if key in Suppressions.VALID_PARAMS:\n confInfo[stanza].append(key, val)\n \n # Key is eai; Set meta \n elif key.startswith(admin.EAI_ENTRY_ACL):\n confInfo[stanza].setMetadata(key, val)\n \n # Key is eai; userName/appName\n elif key.startswith(admin.EAI_META_PREFIX):\n confInfo[stanza].append(key, val)\n \n # Key is not proper\n else:\n pass\n \n # Check each of the settings individually\n logger.info(\"Checking general settings for the '%s' suppression\", stanza)\n for key, val in settings.items():\n # Set val to empty if None\n if val is None:\n val = ''\n \n # Check the disabled/selected value\n if key == Suppressions.PARAM_DISABLED:\n try:\n util.normalizeBoolean(val, enableStrictMode=True)\n \n # Remove the field from the list of required fields\n try:\n required_fields.remove(key)\n \n except ValueError:\n pass # Field not available, probably because it is not required\n \n except ValueError:\n raise InvalidParameterValueException(key, val, \"must be a valid boolean\")\n \n elif key in Suppressions.REQUIRED_PARAMS:\n # Remove the field from the list of required fields\n try:\n required_fields.remove(key)\n \n except ValueError:\n pass # Field not available, probably because it is not required\n \n elif key in Suppressions.VALID_PARAMS:\n pass\n \n # Key is eai\n elif key.startswith(admin.EAI_META_PREFIX):\n pass\n \n # Key is not proper\n else:\n if throwExceptionOnError:\n raise UnsupportedParameterException()\n \n else:\n logger.warn(\"The configuration for '%s' contains an unsupported parameter: %s\", stanza, key)\n\n # Error if some of the required fields were not provided\n if len(required_fields) > 0:\n raise InvalidConfigException('The following fields must be defined in the configuration but were not: ' + ', '.join(required_fields).strip())",
"def settings_validate(ctx):\n path = ctx.obj['load_path']\n if not path:\n _raise_settings_not_found()\n with open(path) as handle:\n config_dict = json.load(handle)\n try:\n config.validate_config(config_dict)\n except exceptions.ConfigValidationError as err:\n raise click.ClickException(\n '{} is invalid: '.format(path) + err.message\n ) from err",
"def checkSettings(self):\n client.checkSettings(self)\n # TODO: Check your settings. Example:\n #\n # if self.postFixParams and len(self.postFixParams) > self.protocolVersion:\n # raise Exception( \"You really don't know how this client works, do you? ... Do I, actually?\" )",
"def checkConf(settings, stanza=None, confInfo=None, onlyCheckProvidedFields=False, existing_settings=None):\r\n\r\n # Add all of the configuration items to the confInfo object so that the REST endpoint lists them (even if they are wrong)\r\n # We want them all to be listed so that the users can see what the current value is (and hopefully will notice that it is wrong)\r\n for key, val in settings.items():\r\n \r\n # Add the value to the configuration info\r\n if stanza is not None and confInfo is not None:\r\n \r\n # Handle the EAI:ACLs differently than the normal values\r\n if key == 'eai:acl':\r\n confInfo[stanza].setMetadata(key, val)\r\n elif key in RadiusAuthRestHandler.VALID_PARAMS and key not in RadiusAuthRestHandler.UNSAVED_PARAMS:\r\n confInfo[stanza].append(key, val)\r\n\r\n # Below is a list of the required fields. The entries in this list will be removed as they\r\n # are observed. An empty list at the end of the config check indicates that all necessary\r\n # fields where provided.\r\n required_fields = RadiusAuthRestHandler.REQUIRED_PARAMS[:]\r\n \r\n # Check each of the settings\r\n for key, val in settings.items():\r\n \r\n # Remove the field from the list of required fields\r\n try:\r\n required_fields.remove(key)\r\n except ValueError:\r\n pass # Field not available, probably because it is not required\r\n \r\n # Stop if not all of the required parameters are not provided\r\n if onlyCheckProvidedFields == False and len(required_fields) > 0: #stanza != \"default\" and \r\n raise admin.ArgValidationException(\"The following fields must be defined in the configuration but were not: \" + \",\".join(required_fields) )\r\n \r\n # Clean up and validate the parameters\r\n cleaned_params = RadiusAuthRestHandler.convertParams(stanza, settings, False)\r\n \r\n # Run the general validators\r\n for validator in RadiusAuthRestHandler.GENERAL_VALIDATORS:\r\n validator.validate( stanza, cleaned_params, existing_settings )\r\n \r\n # Remove the parameters that are not intended to be saved\r\n for to_remove in RadiusAuthRestHandler.UNSAVED_PARAMS:\r\n if to_remove in cleaned_params:\r\n del cleaned_params[to_remove]\r\n \r\n # Return the cleaned parameters \r\n return cleaned_params",
"def __init__(self, settings, valid, defaults=None):\n\n try:\n with open(settings, 'r') as settings_file:\n self._settings = json.load(settings_file)\n except TypeError:\n self._settings = dict(settings)\n self._settings = Settings._inject_defaults(self._settings, defaults)\n Settings._validity_check(self._settings, valid)",
"def check_settings(*settings):\n def _decor(fn):\n def _fn(*args, **kwargs):\n for setting in settings:\n if not get_setting(setting):\n return\n return fn(*args, **kwargs)\n return _fn\n return _decor",
"def ensure_settings_are_valid(settings: Settings):\n if not _get_control(settings):\n raise Exception(\n '\\n'\n 'Proofdock Chaos Kit is not configured.')\n\n api_url = get_api_url(settings)\n if not api_url:\n raise Exception(\n '\\n'\n 'Proofdock Cloud URL is not set. '\n 'Please set it first by calling:\\n\\n'\n '$ chaos configure --default-api-url <API_URL>\\n'\n 'or set PROOFDOCK_API_URL environment variable.')\n\n if not get_api_token(settings, api_url):\n raise Exception(\n '\\n'\n 'Proofdock Cloud API Token is not set. '\n 'Please set it first by calling:\\n\\n'\n '$ chaos configure --token <API_TOKEN>\\n\\n'\n 'or set PROOFDOCK_API_TOKEN environment variable.')\n\n return True",
"def check_parameters_valid(self) :\n for check_parameter in self.parameters :\n if (not self.parameters[check_parameter]['set']) :\n error_message = \"Missing key -> '\" + check_parameter + \"'\"\n if (Config.logger) :\n dtpo_log('fatal', error_message)\n raise ParseError(error_message)\n\n if self.parameters[check_parameter]['type'] == 'dir' :\n value = self.parameters[check_parameter]['value']\n return_string = check_directory_permissions(value)\n if return_string :\n error_message = \"{0} not accessible \" \\\n \"-> {1}\".format(\n check_parameter,\n return_string)\n raise ParseError(error_message)\n elif self.parameters[check_parameter]['type'] == 'file' :\n value = self.parameters[check_parameter]['value']\n try :\n file_pointer = open(value)\n file_pointer.close()\n except IOError as io_error :\n error_message = \"File {0} not accessible -> {2}\" \\\n .format(\n check_parameter,\n self.parameters[check_parameter]['value'],\n str(io_error))\n raise ParseError(error_message)",
"def check_settings_existence(self):\n options = [\n 'AUTH_LDAP_SERVER_URI',\n 'AUTH_LDAP_USER_SEARCH_BASE',\n 'AUTH_LDAP_USER_USERNAME_ATTR',\n 'AUTH_LDAP_PROTOCOL_VERSION',\n 'AUTH_LDAP_BIND_DN',\n 'AUTH_LDAP_BIND_PASSWORD',\n ]\n for option in options:\n if not hasattr(settings, option):\n logger.error('LDAP::check_settings_existence\\tSetting %s is '\n 'not provided', option)\n sys.exit(1)",
"def test_missing_setting(self):\n url = '/%s/job-types/validation/' % self.api\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n config = copy.deepcopy(self.configuration)\n config['settings'] = {}\n json_data = {\n 'manifest': manifest,\n 'configuration': config\n }\n\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertEqual(len(results['warnings']), 3)\n self.assertEqual(results['warnings'][0]['name'], 'MISSING_SETTING')",
"def test_check_ess_settings(self):\n ess_settings1 = {'gaussian': [self.servers[0]], 'molpro': [self.servers[1], self.servers[0]],\n 'qchem': [self.servers[0]]}\n ess_settings2 = {'gaussian': self.servers[0], 'molpro': self.servers[1], 'qchem': self.servers[0]}\n ess_settings3 = {'gaussian': self.servers[0], 'molpro': [self.servers[1], self.servers[0]],\n 'qchem': self.servers[0]}\n ess_settings4 = {'gaussian': self.servers[0], 'molpro': self.servers[1], 'qchem': self.servers[0]}\n ess_settings5 = {'gaussian': 'local', 'molpro': self.servers[1], 'qchem': self.servers[0]}\n\n ess_settings1 = check_ess_settings(ess_settings1)\n ess_settings2 = check_ess_settings(ess_settings2)\n ess_settings3 = check_ess_settings(ess_settings3)\n ess_settings4 = check_ess_settings(ess_settings4)\n ess_settings5 = check_ess_settings(ess_settings5)\n\n ess_list = [ess_settings1, ess_settings2, ess_settings3, ess_settings4, ess_settings5]\n\n for ess in ess_list:\n for soft, server_list in ess.items():\n self.assertTrue(soft in ['gaussian', 'molpro', 'qchem'])\n self.assertIsInstance(server_list, list)\n\n with self.assertRaises(SettingsError):\n ess_settings6 = {'nosoft': ['server1']}\n check_ess_settings(ess_settings6)\n with self.assertRaises(SettingsError):\n ess_settings7 = {'gaussian': ['noserver']}\n check_ess_settings(ess_settings7)",
"def check_settings(self):\n if not self.app.config['SIMPLE_DOMAINS']:\n raise ConfigurationError('You must specify at least one SimpleDB domain to use.')\n\n if not (self.app.config['AWS_ACCESS_KEY_ID'] and self.app.config['AWS_SECRET_ACCESS_KEY']):\n raise ConfigurationError('You must specify your AWS credentials.')",
"def test_unknown_setting(self):\n url = '/%s/job-types/validation/' % self.api\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n config = copy.deepcopy(self.configuration)\n config['settings'] = {\n 'VERSION': '1.0.0',\n 'DB_HOST': 'scale',\n 'DB_PASS': 'password',\n 'setting': 'extra'\n }\n\n json_data = {\n 'manifest': manifest,\n 'configuration': config\n }\n\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertEqual(len(results['warnings']), 1)\n self.assertEqual(results['warnings'][0]['name'], 'UNKNOWN_SETTING')",
"def check_payload_into_settings(self, cls, settings_name):\n payload, expected_triggers = self.create_payload_triggers()\n expected_settings = {'publish': {settings_name: expected_triggers}}\n settings = {}\n cls.payload_into_settings(payload, self.course, settings)\n self.assertItemsEqual(expected_settings, settings)\n\n # Absent from payload should remove from settings. Use settings dict\n # from above, since it will have contents to remove.\n cls.payload_into_settings({}, self.course, settings)\n empty_settings = {'publish': {}}\n self.assertEquals(empty_settings, settings)\n self.assertFalse(settings['publish'].get(settings_name))"
] | [
"0.76656973",
"0.68101305",
"0.6712639",
"0.6699112",
"0.6521201",
"0.6304268",
"0.62737554",
"0.62068",
"0.611062",
"0.5977708",
"0.595072",
"0.5796342",
"0.5790598",
"0.57904774",
"0.5786704",
"0.5760153",
"0.5697195",
"0.5671662",
"0.56589305",
"0.5599054",
"0.5580645",
"0.55398387",
"0.55392885",
"0.5529884",
"0.54761016",
"0.54696214",
"0.5446906",
"0.5436617",
"0.5435706",
"0.5421635"
] | 0.814493 | 0 |
inject any defaults specified in |defaults| into settings. Default values will only be applied if a key exists in |defaults| and doesn't exist in |settings|, or if a key in |settings| has an associating value of None. If |defaults| is None, |settings| is returned as is. | def _inject_defaults(settings, defaults):
new_settings = {}
if defaults is None:
return settings
elif settings is None or len(settings) == 0:
new_settings = defaults
else:
for k, v in settings.items():
if isinstance(v, dict) or v is None:
new_settings[k] = Settings._inject_defaults(v, defaults[k])
else:
new_settings[k] = settings[k]
for k, v in defaults.items():
if k not in settings:
new_settings[k] = defaults[k]
return new_settings | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_defaults(self, settings_dict=None, **settings):\n settings_dict = settings_dict or {}\n settings_dict.update(settings)\n return self.set_values(settings_dict, override=False)",
"def loadDefaults(self,defaults):\n for key in defaults.keys():\n if key not in self.data:\n self.data[key] = defaults[key]",
"def set_defaults(fields, defaults):\n undefined = set(defaults.keys()) - set(fields.keys())\n for k in undefined:\n v = defaults[k]\n # see http://pyparsing.wikispaces.com/share/view/71042464\n fields[k] = v\n fields.append(v)",
"def twolevel_default_params(defaults):\n def wrap(function):\n def withargs(*args, **kwargs):\n merged = {}\n merged.update(defaults)\n for k, v in kwargs.items():\n if type(v) == dict and k in merged and type(merged[k]) == dict:\n merged[k].update(v)\n else:\n merged[k] = v\n return function(*args, **merged)\n\n return withargs\n\n return wrap",
"def init_defaults(self, defaults):\r\n for (sect, opt, default) in defaults:\r\n self._default(sect, opt, default)",
"def replace_defaults(d):\n\n # remove the defaults section\n defaults = d.pop('.defaults')\n\n # look for default tags and replace them\n for k, v in defaults.items():\n recursive_search_replace(d, '!' + k + '!', v)",
"def add_defaults(cls, defaults):\n defaults.wallet = bittensor.Config()\n defaults.wallet.name = os.getenv('BT_WALLET_NAME') if os.getenv('BT_WALLET_NAME') != None else 'default'\n defaults.wallet.hotkey = os.getenv('BT_WALLET_HOTKEY') if os.getenv('BT_WALLET_HOTKEY') != None else 'default'\n defaults.wallet.path = os.getenv('BT_WALLET_PATH') if os.getenv('BT_WALLET_PATH') != None else '~/.bittensor/wallets/'",
"def save_defaults(self, overwrite=False):\r\n for (section, option), value in self.defaults.iteritems():\r\n if value is None:\r\n continue\r\n if section not in self.__config:\r\n self.__config[section] = {}\r\n if overwrite or option not in self.__config[section]:\r\n self.__config[section][option] = value\r\n self.save()",
"def add_default_configs(configs: dict, default_configs: dict):\n for key, value in default_configs.items():\n if key not in configs:\n configs[key] = value\n elif isinstance(default_configs[key], dict) and isinstance(configs[key], dict):\n add_default_configs(configs[key], default_configs[key])\n else:\n continue\n\n return configs",
"def set_defaults(cls, deco_classname, defaults: dict):\n # Change defaults of items in cls._classname2SettingsData_dict[deco_classname]\n deco_settings = cls._classname2SettingsData_dict[deco_classname]\n\n # Integrity check:\n # if setting_name is not a \"setting\" or it's not a \"visible\" setting for cls,\n # raise KeyError: that's what __getitem__/__setitem__ do\n for setting_name in defaults:\n if setting_name not in deco_settings:\n raise KeyError(\n \"set_defaults: no such setting (key) as '%s'\" % setting_name)\n elif not deco_settings[setting_name].visible:\n raise KeyError(\n \"set_defaults: setting (key) '%s' is not visible in class %s.\"\n % (setting_name, deco_classname))\n\n # TODO 'indirect' values -- Disallow? anyway, prevent? Somehow.\n # | Perhaps just get rid of any trailing INDIRECT_VALUE_MARKER ('=')\n\n # Change working default values\n for setting_name in defaults:\n deco_setting = deco_settings[setting_name]\n new_default_val = defaults[setting_name]\n\n if ((new_default_val or deco_setting.allow_falsy)\n and deco_setting.has_acceptable_type(new_default_val)\n ):\n # set working default value = new_default_val\n deco_setting.default = new_default_val",
"def update_with_defaults(**kwargs):\n # Update the defaults with the input values\n with open(DEFAULTS, \"r\") as f:\n defaults = json.load(f)\n return _update(kwargs, defaults)",
"def default_params(defaults):\n def wrap(function):\n def withargs(*args, **kwargs):\n merged = {}\n merged.update(defaults)\n merged.update(kwargs)\n return function(*args, **merged)\n return withargs\n return wrap",
"def load_from_defaults(self):\n default_settings = import_module('mindinsight.conf.defaults')\n for setting in dir(default_settings):\n if setting.isupper():\n setattr(self, setting, getattr(default_settings, setting))\n self._default_settings.add(setting)",
"def replaceDefaults(d):\n defaults = d.pop('.defaults')\n for k, v in defaults.items():\n recursiveSearchReplace(d, '!' + k + '!', v)",
"def defaulted_values(source_dict, defaults):\n return {\n k: v if v is not None else defaults[k] for k, v in source_dict.items()\n }.values()",
"def setup_settings():\n # pylint: disable=import-outside-toplevel\n from django.conf import settings\n import tiny_erp.settings as defaults\n\n for name in dir(defaults):\n if name.isupper() and not hasattr(settings, name):\n setattr(settings, name, getattr(defaults, name))",
"def _merge_settings(default_settings: JsonValue, user_settings: JsonValue, use_default_values: bool) -> JsonValue:\n if isinstance(default_settings, dict):\n user_settings = typing.cast(Json, user_settings)\n for key, default_value in default_settings.items():\n if key not in user_settings:\n user_settings[key] = _merge_settings(default_value, {}, use_default_values)\n elif isinstance(default_value, (dict, list)):\n user_settings[key] = _merge_settings(default_value, user_settings[key], use_default_values)\n elif not user_settings[key] and use_default_values:\n user_settings[key] = default_value\n return user_settings\n if isinstance(default_settings, list):\n # In this case, assume that there is only one item in default_settings\n user_settings = typing.cast(List[JsonValue], user_settings)\n if not user_settings:\n return default_settings\n updated_settings: List[JsonValue] = []\n for setting in user_settings:\n updated_settings.append(_merge_settings(default_settings[0], setting, use_default_values))\n return updated_settings\n return default_settings if use_default_values else ''",
"def load_config(defaults):\n defaults.update(DEFAULTS)\n\n config = configparser.ConfigParser()\n config.read(CONFIG_PATH)\n\n options = {}\n for section in defaults:\n section_options = {}\n\n if section not in config:\n for opt_name in defaults[section]:\n opt_default = defaults[section][opt_name]\n section_options[opt_name] = opt_default\n else:\n config_section = config[section]\n for opt_name, opt_default in defaults[section].items():\n section_options[opt_name] = config_section.get(\n opt_name, opt_default)\n\n options[section] = section_options\n\n return options",
"def _update_default_configs(\n default_configs: tuple[dict[Any, Any]], passed_configs: tuple[dict[Any, Any]]\n ):\n\n for default_config, passed_config in zip(default_configs, passed_configs):\n if passed_config is not None:\n update_dict_recursively(default_config, passed_config)",
"def defaults(self, **kwargs):\n for i in kwargs:\n self._.setdefault(i, kwargs[i])\n return self",
"def dict(self, *names, **defaults):\n for name, value in defaults.items():\n defaults[name] = self.get(name, value)\n if not defaults:\n defaults = dict(self.items())\n defaults.update(zip(names, map(self.getlist, names)))\n return defaults",
"def default_args(defaults):\r\n def wrapper(func):\r\n @wraps(func) # just to show docstring of original function\r\n def new_func(*args, **kwargs):\r\n kwargs = defaults | kwargs\r\n return func(*args, **kwargs)\r\n return new_func\r\n return wrapper",
"def add_default_settings_config(self):\n config = {\n mconst.DEF_SETTINGNAME_default_logfilename: mconst.DEF_SETTINGVAL_default_logfilename_defaultvalue,\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_config, config)",
"def set_settings(self, **settings):\r\n for key in settings:\r\n if not key in self.DEFAULT_SETTINGS:\r\n raise ValueError(str(key) + \" is not a valid setting\")\r\n self.settings = {**self.settings, **settings}",
"def add_defaults(self, options):\n if 'option_defaults' in self.cscript:\n from_cscript = self.cscript['option_defaults']\n if isinstance(from_cscript, dict):\n defaults_dict = from_cscript\n else:\n log_normal(\"Deprecated cscript option_defaults method; replace with a dict\")\n defaults_dict = from_cscript()\n for k, v in defaults_dict.items():\n if not k in options:\n options[k] = v",
"def loadDefaults(self):\n # (025) Merged into settings.RawSettings.\n pass",
"def propagate_defaults(config_doc):\n for group_name, group_doc in config_doc.items():\n if isinstance(group_doc, dict):\n defaults = group_doc.get('defaults', {})\n\n for item_name, item_doc in group_doc.items():\n if item_name == 'defaults':\n continue\n if isinstance(item_doc, dict):\n\n group_doc[item_name] = \\\n dict_merge_pair(copy.deepcopy(defaults), item_doc)\n\n return config_doc",
"def default_settings(self, settings):\n return {}",
"def initDefaults(self, kwargs):\n \n for k,v in self.defaults.iteritems():\n if k in kwargs: # use assigned values\n setattr(self, k, kwargs[k])\n else: # use default values\n setattr(self, k, v)\n \n for k,v in kwargs.iteritems():\n if k not in self.defaults:\n setattr(self, k, v)\n pass",
"def add_defaults(variables, functions, case_sensitive):\r\n all_variables = dict(DEFAULT_VARIABLES)\r\n all_functions = dict(DEFAULT_FUNCTIONS)\r\n all_variables.update(variables)\r\n all_functions.update(functions)\r\n\r\n if not case_sensitive:\r\n all_variables = lower_dict(all_variables)\r\n all_functions = lower_dict(all_functions)\r\n\r\n return (all_variables, all_functions)"
] | [
"0.65865844",
"0.64090306",
"0.6406142",
"0.6250104",
"0.62078786",
"0.61736166",
"0.61570066",
"0.61391175",
"0.6111803",
"0.6052231",
"0.60497946",
"0.60248345",
"0.6000381",
"0.5997895",
"0.5956305",
"0.5923101",
"0.5910845",
"0.5898228",
"0.5872198",
"0.58597386",
"0.5825364",
"0.5786885",
"0.57715815",
"0.57346904",
"0.5719446",
"0.57046133",
"0.5702735",
"0.5689816",
"0.5684772",
"0.56713706"
] | 0.85035056 | 0 |
create a Settings object. |settings| can be a dict or path to json file. If a dict, then values in |settings| must be a primitive (int, float, bool, str), list, or dict. |valid| must be a dict. |settings| represents the user settings where each pair is a setting name associated to a chosen setting value. |valid| represents all valid user settings where each pair is a setting name associated to possible legal setting values. Here's some examples, value associated to 'foo' must be either 'b' or 'a' | def __init__(self, settings, valid, defaults=None):
try:
with open(settings, 'r') as settings_file:
self._settings = json.load(settings_file)
except TypeError:
self._settings = dict(settings)
self._settings = Settings._inject_defaults(self._settings, defaults)
Settings._validity_check(self._settings, valid) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _validity_check(settings, valid):\n\n Settings._dict_validity_check(settings, valid)",
"def _read_settings_file(cls, settings_path=''):\n if not settings_path:\n return {}\n\n if os.path.isdir(settings_path):\n settings_path = os.path.join(settings_path, '.' + cls.__name__)\n if not os.path.isfile(settings_path):\n return {}\n\n d = {} # returned\n try:\n with open(settings_path) as f:\n lines = f.readlines()\n except BaseException: # FileNotFoundError?!\n return d\n\n settings_dict = DecoSettingsMapping.get_deco_class_settings_dict(cls.__name__)\n for line in lines:\n line = line.strip()\n # Allow blank lines & comments\n if not line or line[0] == '#':\n continue\n\n try:\n setting, val_txt = line.split('=', 1) # only split at first '='\n except ValueError:\n # fail silently. (Or, TODO: report error? ill-formed line)\n continue # bad line\n setting = setting.strip()\n val_txt = val_txt.strip()\n\n if setting not in settings_dict or not val_txt:\n # fail silently. (Or, TODO: report error? ill-formed line)\n continue\n\n # special case: None\n if val_txt == 'None':\n if settings_dict[setting].allow_falsy:\n d[setting] = None\n continue\n\n # If val_txt is enclosed in quotes (single or double)\n # and ends in '=' (indirect value) then let val = val_txt;\n # otherwise, defer to settings_dict[setting].value_from_str\n is_indirect = (is_quoted_str(val_txt) and\n len(val_txt) >= 3 and\n val_txt[-2] == '=')\n if is_indirect:\n val = val_txt[1:-1] # remove quotes\n else:\n try:\n val = settings_dict[setting].value_from_str(val_txt)\n except ValueError as e:\n # fail silently. (Or, TODO: report error? bad value)\n continue # bad line\n\n d[setting] = val\n\n return d",
"def validate_settings(self, settings):\n pass",
"def parseSettings(settings_file):\n\t# Make a new settings object\n\tsetting_object = settings.Settings()\n\n\t# Read the file line by line\n\tfor line in settings_file:\n\t\tthis_line = line.split()\n\t\tif this_line == []:\n\t\t\tpass\n\t\telif this_line[0] == 'input':\n\t\t\tfor filename in this_line[1:]:\n\t\t\t\tsetting_object.addInput(filename)\n\t\telif this_line[0] == 'atom':\n\t\t\tsymbol = this_line[1]\n\t\t\tnumber = this_line[2]\n\t\t\tmass = this_line[3]\n\t\t\tcharge = this_line[4]\n\t\t\tsigma = this_line[5]\n\t\t\teps = this_line[6]\n\t\t\tsetting_object.addAtom(symbol, number, mass, charge, sigma, eps)\n\t\telif this_line[0] == 'mix':\n\t\t\tsetting_object.mix()\n\t\telif this_line[0] == 'bond':\n\t\t\tatom1 = this_line[1]\n\t\t\tatom2 = this_line[2]\n\t\t\tdistance = this_line[3]\n\t\t\tbond_length = this_line[4]\n\t\t\tforce_constant = this_line[5]\n\t\t\tsetting_object.addBond(atom1, atom2, distance, bond_length, force_constant)\n\t\telif this_line[0] == 'angle':\n\t\t\tatom1 = this_line[1]\n\t\t\tatom2 = this_line[2]\n\t\t\tatom3 = this_line[3]\n\t\t\tangle = this_line[4]\n\t\t\tangle_constant = this_line[5]\n\t\t\tsetting_object.addAngle(atom1, atom2, atom3, angle, angle_constant)\n\t\telif this_line[0] == 'molecule':\n\t\t\tresidue = this_line[1]\n\t\t\tnmol = this_line[2]\n\t\t\tnrexcl = this_line[3]\n\t\t\tsetting_object.addMolecule(residue, nmol, nrexcl)\n\t\telif this_line[0] == 'output':\n\t\t\toutput = this_line[1]\n\t\t\tsetting_object.addOutput(output)\n\t\telif this_line[0] == 'system':\n\t\t\tsystem = \"\".join(this_line[1:])\n\t\t\tsetting_object.addSystem(system)\n\t\telif this_line[0] == '#':\n\t\t\tpass\n\treturn setting_object",
"def from_settings(settings):",
"def process_settings(self, settings):\n default_settings = self.default_settings()\n\n processed_settings = {}\n\n for key, value in default_settings.items():\n if key in settings:\n processed_settings[key] = settings[key]\n else:\n processed_settings[key] = value\n\n self.validate_settings(processed_settings)\n\n return processed_settings",
"def loadSettings():\r\n try:\r\n settingsFile = open(sys.argv[1], \"r\")\r\n except IOError:\r\n logging.exception(\"Error opening settings.\")\r\n exitApp()\r\n \r\n settingStr = settingsFile.read()\r\n settingsFile.close()\r\n \r\n try:\r\n settings = json.loads(settingStr)\r\n except ValueError:\r\n logging.exception(\"Error parsing settings.\")\r\n exitApp()\r\n \r\n # Check integrity\r\n if (len(settings[\"reddit_username\"]) == 0):\r\n logging.critical(\"Reddit username not set.\")\r\n exitApp()\r\n \r\n if (len(settings[\"reddit_password\"]) == 0):\r\n logging.critical(\"Reddit password not set.\")\r\n exitApp()\r\n \r\n if (len(settings[\"reddit_subreddit\"]) == 0):\r\n logging.critical(\"Subreddit not set.\")\r\n exitApp()\r\n \r\n if (len(settings[\"reddit_ua\"]) == 0):\r\n logging.critical(\"Reddit bot user agent not set.\")\r\n exitApp()\r\n \r\n settings[\"repost_protection\"] = bool(settings[\"repost_protection\"])\r\n \r\n return settings",
"def set_from_dictionary(self, settings):\n for key, value in settings.items():\n if key in dir(self):\n setattr(self, key, value)\n continue\n msg = f'Invalid key value of {key} provided in dictionary of conversion settings'\n self.logger.warning(msg)\n if not config.silent:\n print(msg)",
"def settings() -> Settings:\n return Settings()",
"def process_settings(self, settings_file):\n int_keys = [ 'first_base_to_keep', 'last_base_to_keep', 'max_reads_to_split', 'minimum_reads_for_inclusion',\n 'pool_5trim', 'pool_3trim', 'min_post_adaptor_length']\n #float_keys = []\n str_keys = ['adaptor_sequence', 'rrna_index', 'genome_index', 'pool_append', 'pool_prepend', 'primer_sequence']\n boolean_keys = ['collapse_identical_reads', 'force_read_resplit', 'force_remapping', 'force_recollapse',\n 'force_recount', 'force_index_rebuild', 'force_retrim', 'trim_adaptor']\n list_str_keys = ['fastq_gz_files', 'sample_names']\n #list_float_keys = ['concentrations', 'input_rna']\n extant_files = ['pool_fasta',]\n config = ConfigParser.ConfigParser()\n config.read(settings_file)\n settings = {}\n for section in config.sections():\n for option in config.options(section):\n settings[option] = config.get(section, option)\n settings[section] = True\n for k in int_keys:\n settings[k] = int(settings[k])\n for k in str_keys:\n settings[k] = settings[k]\n #for k in float_keys:\n # settings[k] = float(settings[k])\n for k in boolean_keys:\n if not settings[k].lower() in ['true', 'false']:\n raise ValueError(\n 'Boolean value %s must be \"true\" or \"false\"' % k)\n settings[k] = settings[k].lower() == 'true'\n #for k in list_float_keys:\n # settings[k] = map(float, simplejson.loads(settings[k]))\n #for k in list_int_keys:\n # settings[k] = map(int, simplejson.loads(settings[k]))\n for k in list_str_keys:\n settings[k] = simplejson.loads(settings[k])\n self.fqdir = settings['fastq_dir']\n self.sample_names = settings['sample_names']\n self.fastq_gz_file_handles = [os.path.join(self.fqdir, fastq_gz_file) for fastq_gz_file in\n settings['fastq_gz_files']]\n for file_handle in self.fastq_gz_file_handles:\n assert tps_utils.file_exists(file_handle)\n for k in extant_files:\n assert tps_utils.file_exists(settings[k])\n self.settings = settings\n self.wdir = settings['working_dir']\n self.rdir = settings['results_dir']\n shutil.copy(settings_file, self.rdir)",
"def test_user_settings_serialization(self):\n\n # Construct a json representation of a UserSettings model\n user_settings_model_json = {}\n user_settings_model_json['language'] = 'testString'\n user_settings_model_json['notification_language'] = 'testString'\n user_settings_model_json['allowed_ip_addresses'] = '32.96.110.50,172.16.254.1'\n user_settings_model_json['self_manage'] = True\n\n # Construct a model instance of UserSettings by calling from_dict on the json representation\n user_settings_model = UserSettings.from_dict(user_settings_model_json)\n assert user_settings_model != False\n\n # Construct a model instance of UserSettings by calling from_dict on the json representation\n user_settings_model_dict = UserSettings.from_dict(user_settings_model_json).__dict__\n user_settings_model2 = UserSettings(**user_settings_model_dict)\n\n # Verify the model instances are equivalent\n assert user_settings_model == user_settings_model2\n\n # Convert model instance back to dict and verify no loss of data\n user_settings_model_json2 = user_settings_model.to_dict()\n assert user_settings_model_json2 == user_settings_model_json",
"def settings(self, settings):\n if settings is None:\n raise ValueError(\"Invalid value for `settings`, must not be `None`\")\n\n self._settings = settings",
"def set_settings(self, **settings):\r\n for key in settings:\r\n if not key in self.DEFAULT_SETTINGS:\r\n raise ValueError(str(key) + \" is not a valid setting\")\r\n self.settings = {**self.settings, **settings}",
"def import_settings(path_to_settings=None):\n\n file_path = 'settings.json' if path_to_settings is None else path_to_settings\n\n if not os.path.isfile(file_path):\n # settings file doesn't exist\n raise IOError(errno.ENOENT, os.strerror(errno.ENOENT), 'settings.json')\n\n with open(file_path) as in_file:\n data = json.load(in_file)\n settings = Settings()\n\n # required attributes, fail if missing\n try:\n settings.input_file_path = os.path.join(os.path.dirname(sys.argv[0]), data['input_folder'], data['input_file'])\n settings.output_file_path = os.path.join(os.path.dirname(sys.argv[0]), data['output_folder'], data['output_file'])\n settings.default_timezone = data['default_timezone']\n settings.output_timezone = data['output_timezone']\n settings.custom_column_headers = data.get('custom_column_headers', [])\n settings.app_id = data['app_id']\n except KeyError as e:\n print(\"Key not found in {}: \".format(file_path) + str(e))\n sys.exit(1)\n\n return settings",
"def save_settings(cls, project=None, user=None, settings=None):\n\n if (isinstance(settings, dict)):\n _to_update = settings\n if (\n isinstance(user, User)\n and isinstance(project, Project)\n ):\n _user_id = user.id\n _project_id = project.id\n elif (\n isinstance(user, (int, str,))\n and isinstance(project, (int, str,))\n ):\n _user_id = user\n _project_id = project\n else:\n raise TypeError\n cls.http_post(\n 'update_settings',\n json={\n 'project_preferences': {\n 'user_id': _user_id,\n 'project_id': _project_id,\n 'settings': _to_update,\n }\n }\n )\n else:\n raise TypeError",
"def SetSettings (self, settings) :\n\t\treturn self.run(\"SetSettings\", settings)",
"def create_settings():\n\n settings = {}\n\n settings['induction'] = {'type': 'DT'}\n\n settings['selection'] = {'type': 'Base',\n 'its': 1,\n 'param': 1}\n\n settings['prediction'] = {'type': 'MI',\n 'its': 0.1,\n 'param': 0.95}\n\n settings['queries'] = {}\n\n settings['metadata'] = {}\n\n settings['model_data'] = {}\n\n return settings",
"def _generate_settings(self):\n settings = {}\n settings[\"api_client_id\"] = input(\"(OPTIONAL) Please enter your Twitch API Client ID: \") #Get API Client ID first so I can use API to get user ID\n #Save JSON\n fileIO.save_json(\"settings.json\", settings)\n name = False\n while not name: #While name not set\n name = input(\"Please enter the username of your Twitch account: \").lower()\n userID = self._get_user_id(name)\n if not userID:\n name = False\n settings[\"userid\"] = userID\n settings[\"oauth\"] = input(\"Please enter the oauth token for your Twitch account: \")\n if settings[\"oauth\"].startswith(\"oauth:\"): #If the oauth token starts with oauth:, remove it\n settings[\"oauth\"] = settings[\"oauth\"][6:]\n settings[\"error_webhook\"] = input(\"Please enter the Discord WebHook URL you would like errors to be sent to: \")\n #Save JSON\n fileIO.save_json(\"settings.json\", settings)",
"def check_settings_syntax(settings_dict: dict, settings_metadata_dict: dict):\n try:\n f_root(**settings_dict)\n except ValidationError as e:\n msg = ''\n for error in e.errors():\n loc = error['loc']\n error_msg = \"Validation error for setting {}, bad value: {} (value origin: {})\\n\".format(\n '->'.join(str(x) for x in loc),\n get_pydantic_error_value(settings_dict, loc),\n settings_metadata_dict[loc[0]]\n )\n error_msg += \"Message: {}\\n\".format(error['msg'])\n msg += error_msg\n logger.error(msg)\n raise SettingsSyntaxError(msg)",
"def saveSettings(self,settings):\n settings = dict((LString(x),dict((LString(u),v) for u,v in y.iteritems())) \n for x,y in settings.iteritems())\n reComment = re.compile(';.*')\n reSection = re.compile(r'^\\[\\s*(.+?)\\s*\\]$')\n reSetting = re.compile(r'(.+?)\\s*=')\n #--Read init, write temp\n #self.ensureExists()\n path = GPath(self.path)\n iniFile = path.open('r')\n tmpFile = path.temp.open('w')\n section = sectionSettings = None\n for line in iniFile:\n stripped = reComment.sub('',line).strip()\n maSection = reSection.match(stripped)\n maSetting = reSetting.match(stripped)\n if maSection:\n section = LString(maSection.group(1))\n sectionSettings = settings.get(section,{})\n elif maSetting and LString(maSetting.group(1)) in sectionSettings:\n key = LString(maSetting.group(1))\n value = sectionSettings[key] \n if isinstance(value,str) and value[-1] == '\\n':\n line = value\n else:\n line = '%s=%s\\n' % (key,value)\n tmpFile.write(line)\n tmpFile.close()\n iniFile.close()\n #--Done\n path.untemp()",
"def set_settings(self, settings={}):\n # type: (dict) -> Entity\n if not settings:\n return\n\n # these are used to help with calculations\n t = ('auto', 'fixed')\n for v in ('position', 'size'):\n if v in settings:\n settings[v] = settings[v].lower()\n if settings[v] in t:\n self.settings[v] = settings[v]\n\n # these are inherent entity values\n for s in ['x', 'y', 'width', 'height']:\n self.type_def[s] = settings.get(s, 0)\n\n return self",
"def parse_settings(self, settings):\n if settings:\n for k, v in settings.iteritems():\n if k.startswith('SCRIPT_'):\n try:\n index = int(k.replace('SCRIPT_', '')) - 1\n self._script_names[index] = v\n except:\n pass\n\n if k.startswith('LINK_TYPE_') and v in LINK_TYPE_NAMES:\n try:\n index = int(k.replace('LINK_TYPE_', '')) - 1\n self._link_types[index] = LINK_TYPE_NAMES.index(v)\n except:\n pass\n\n if 'PUSH2' in self._script_names:\n task = partial(self.connect_script_instances, self.canonical_parent._control_surfaces())\n self.canonical_parent.schedule_message(50, task)",
"def initialize(cls, settings):\n\n settings_obj = SettingsService.load_game_conf()\n\n for entry in SettingsService.GAME_SETTINGS:\n value = settings_obj.get(SettingsService.GAME_SETTINGS_ROOT, {}).get(entry, None)\n if value is None:\n raise RuntimeError(f\"Entry {entry} is missing in settings.\")\n\n settings[entry] = float(value)\n\n return settings",
"def load_from_settings(self):\n for param, value in self.settings['swan'].items():\n # Some settings do not have a GUI element, continue if encountered\n if param not in self.input_elements.keys():\n continue\n\n # Check if parameter is not empty before filling in\n if self.validate_parameter(value):\n self.input_elements[param].set_value(value)\n\n # Validate\n self.validate(check_empty=False)",
"def initialize(cls, settings: Settings) -> Settings:\n\n settings_obj = SettingsService.load_game_conf()\n\n for entry in SettingsService.GAME_SETTINGS:\n value = settings_obj.get(SettingsService.GAME_SETTINGS_ROOT, {}).get(\n entry, None\n )\n if value is None:\n raise RuntimeError(f\"Entry {entry} is missing in settings.\")\n\n setattr(settings, entry, value)\n\n for entry in SettingsService.INITIALS:\n value = settings_obj.get(SettingsService.INITIALS_ROOT, {}).get(entry, None)\n if value is None:\n raise RuntimeError(f\"Entry {entry} is missing in settings.\")\n\n settings.initials[entry] = value\n\n return settings",
"def load_settings(filename=None):\n filename = filename or SETTINGS\n return common.open_and_read_file(filename, as_json=True)",
"def process_user_settings(user_settings=None, user_id=None, albums=None, avatar=None, banned=None, biography=None,\n bonus_points=None, cover_image=None, deleted=None, email=None, email_excluded=None,\n first_name=None, followers=None, following=None, href=None, images=None, kudos_given=None,\n kudos_received=None, kudos_weight=None, language=None, last_name=None, last_visit_time=None,\n location=None, login=None, messages=None, metrics=None, online_status=None, password=None,\n personal_data=None, public_images=None, rank=None, registration_data=None, reviews=None,\n roles=None, signature_topics=None, solutions_authored=None, sso_id=None,\n threads_participated=None, topics=None, user_badges=None, videos=None, view_href=None,\n web_page_url=None):\n default_settings = {\n 'id': user_id,\n 'albums': albums,\n 'avatar': avatar,\n 'banned': banned,\n 'biography': biography,\n 'bonus_points': bonus_points,\n 'cover_image': cover_image,\n 'deleted': deleted,\n 'email': email,\n 'email_excluded': email_excluded,\n 'first_name': first_name,\n 'followers': followers,\n 'following': following,\n 'href': href,\n 'images': images,\n 'kudos_given': kudos_given,\n 'kudos_received': kudos_received,\n 'kudos_weight': kudos_weight,\n 'language': language,\n 'last_name': last_name,\n 'last_visit_time': last_visit_time,\n 'location': location,\n 'login': login,\n 'messages': messages,\n 'metrics': metrics,\n 'online_status': online_status,\n 'password': password,\n 'personal_data': personal_data,\n 'public_images': public_images,\n 'rank': rank,\n 'registration_data': registration_data,\n 'reviews': reviews,\n 'roles': roles,\n 'signature_topics': signature_topics,\n 'solutions_authored': solutions_authored,\n 'sso_id': sso_id,\n 'threads_participated': threads_participated,\n 'topics': topics,\n 'user_badges': user_badges,\n 'videos': videos,\n 'view_href': view_href,\n 'web_page_url': web_page_url\n }\n # Use the default settings if settings are not explicitly defined\n if not user_settings:\n user_settings = default_settings\n\n # Overwrite any settings where fields are explicitly passed as arguments\n for field_name, field_value in default_settings.items():\n if default_settings.get(field_name):\n user_settings[field_name] = field_value\n\n # Ensure the User ID uses 'id' rather than 'user_id' as the field name\n if 'user_id' in user_settings and 'id' not in user_settings:\n user_settings['id'] = user_settings['user_id']\n del user_settings['user_id']\n return user_settings",
"def load_settings(self):\n settings_file = open('./resources/settings.json')\n settings = json.load(settings_file)\n settings_file.close()\n try:\n if settings['camera'] in self.camera_list:\n self.comboCamera.setCurrentIndex(settings['camera'])\n self.comboRotation.setCurrentIndex(settings['rotation'])\n self.spinMinHue.setValue(settings['colors']['min_hue'])\n self.spinMaxHue.setValue(settings['colors']['max_hue'])\n self.spinMinSaturation.setValue(settings['colors']['min_saturation'])\n self.spinMaxSaturation.setValue(settings['colors']['max_saturation'])\n self.spinMinValue.setValue(settings['colors']['min_value'])\n self.spinMaxValue.setValue(settings['colors']['max_value'])\n self.spinDiameter.setValue(settings['diameter'])\n self.lineEditLifter.setText(settings['lifter'])\n self.checkSaveVideo.setChecked(settings['save_video'])\n except KeyError:\n self.statusbar.clearMessage()\n self.statusbar.showMessage('Error in settings.json. Loading defaults instead.')",
"def load_settings():\r\n if os.path.exists('settings.json'):\r\n json_data = open('settings.json').read()\r\n\r\n data = json.loads(json_data)\r\n return data\r\n else:\r\n return False",
"def _check_settings_validity(self, settings: list):\n\n if isinstance(settings, list):\n # if list is empty\n if not settings:\n raise ValueError('The given settings are an empty list, please make sure to add a dictionary with a key \\'CLF_NAME\\' and a corresponding classfier name as value. You can specify hyperparameters for the classifier with the key \\'HYPERPARAMS\\'.')\n \n # if not all entries in the list are of type dict raise an error\n if not all(isinstance(s, dict) for s in settings):\n raise TypeError(f'Elements in settings are expected to be of type \\'dict\\'.')\n\n for setting in settings:\n # if there is no CLF_NAME key in the dict of the setting entry raise an error\n if 'CLF_NAME' not in setting.keys():\n raise KeyError(f'Every entry in settings is required to have a \\'CLF_NAME\\' key, please make sure that this key exists in every entry in settings.')\n \n # get the classifier and its corresponding parameters\n classifier = self._get_classifier_to_name(setting['CLF_NAME'])\n\n # check if the classifier also has a predict_proba() function\n if not(hasattr(classifier,'predict_proba') and callable(getattr(classifier,'predict_proba'))):\n raise ValueError('')\n \n clf_params_keys = classifier.get_params().keys()\n\n # check if hyperparameters are given as list or as dict\n if 'HYPERPARAMS' in setting.keys():\n hyperparams = setting['HYPERPARAMS']\n\n # if given as list, all elements in the list must be of type dict\n if isinstance(hyperparams, list):\n # if hyperparameter list is empty\n if not hyperparams:\n raise ValueError('The given hyperparameters are an empty list, please make sure to add hyperparameters as \\'dict\\' where a key represents the parameter name and the value is the parameter value/values wrapped in a list.')\n\n if not all(isinstance(s, dict) for s in settings):\n raise TypeError(f'Elements in the settings hyperparameters are expected to be of type \\'dict\\'.')\n \n # loop through the dicts in HYPERPARAMS\n for hyperparams_entry in hyperparams:\n # for each dict check if the keys are valid paramters of the corresponding classifier\n for hyperparams_entry_key in hyperparams_entry.keys():\n # check if the value to the key is a list otherwise raise an error:\n hyperparams_entry_value = hyperparams_entry[hyperparams_entry_key]\n \n if not isinstance(hyperparams_entry_value, list):\n raise TypeError(f'The hyperparameter {hyperparams_entry_key} in the {classifier.__class__.__name__} settings must be of type \\'list\\', got type \\'{type(hyperparams_entry_value).__name__}\\', make sure that every specified hyperparameter is wrapped in a list.')\n\n # if the parameter value list is empty\n if not hyperparams_entry_value:\n raise ValueError(f'Valuelist for hyperparameter {hyperparams_entry_key} is empty. Please specify values for the hyperparameter {hyperparams_entry_key} or remove it from HYPERPARAMS.')\n\n # if the key is not in the parameters specified by sklearn raise an error\n if not hyperparams_entry_key in clf_params_keys:\n raise NameError(f'The specified hyperparameter {hyperparams_entry_key} is not a supported paramter of {classifier.__class__.__name__}, make sure to only use supported parameters (see the sklearn documentation of {classifier.__class__.__name__} for a list of valid parameters).')\n \n # if given as dict just check if the keys are valid paramters of the corresponding classifier\n elif isinstance(hyperparams, dict):\n for hyperparam_key in hyperparams.keys():\n # check if the value to the key is a list otherwise raise an error:\n hyperparams_value = hyperparams[hyperparam_key]\n\n if not isinstance(hyperparams_value, list):\n raise TypeError(f'The hyperparameter {hyperparam_key} in the {classifier.__class__.__name__} settings must be of type \\'list\\', got type \\'{type(hyperparams_value).__name__}\\', make sure that every specified hyperparameter is wrapped in a list.')\n \n # if the key is not in the parameters specified by sklearn raise an error\n if not hyperparam_key in clf_params_keys:\n raise NameError(f'The specified hyperparameter {hyperparam_key} is not a supported paramter of {classifier.__class__.__name__}, make sure to only use supported parameters (see the sklearn documentation of {classifier.__class__.__name__} for a list of valid parameters).')\n\n else:\n raise TypeError(f'Hyperparameters in settings must be either of type \\'dict\\' or \\'list\\', got type \\'{type(hyperparams).__name__}\\'')\n\n else:\n raise TypeError(f'Settings must be of type \\'list\\', passed settings are of type \\'{type(settings).__name__}\\'')"
] | [
"0.6769524",
"0.6685451",
"0.65674096",
"0.652653",
"0.6333047",
"0.62696064",
"0.6178105",
"0.6040261",
"0.59549224",
"0.591162",
"0.58818275",
"0.58598006",
"0.5854006",
"0.5809394",
"0.5806272",
"0.57916164",
"0.5766842",
"0.57582766",
"0.5749986",
"0.5732465",
"0.5693892",
"0.56398475",
"0.56206936",
"0.5592768",
"0.55727607",
"0.5570312",
"0.55660653",
"0.55596656",
"0.55558354",
"0.5538824"
] | 0.7713784 | 0 |
return the number of settings | def __len__(self):
return len(self._settings) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def number_of_sections(self):\n #print (len(self.config.sections()))\n return len(self.config.sections())",
"def count(cls, client) :\n\t\ttry :\n\t\t\tobj = appfwlearningsettings()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e",
"def config_count(self) -> int:\n return pulumi.get(self, \"config_count\")",
"def __len__(self):\n #return len(self._tagged_values_dict)\n return len(list(self._visible_setting_names_gen))",
"def number_of_sections(self):\n sections = self.config.sections()\n return len(sections)",
"def count(self):\n return self.properties.get('count')",
"def n_conf(self):\n return self._configuration_sets[0].n_conf",
"def get_config_count():\n return jsonify(config_count=config_count(), config_limit=config_limit)",
"def n_configs(self):\n return self._faux._n_configs",
"def getNumberOfKeys(self) -> int:\n ...",
"def config_count():\n return int(len([name for name in os.listdir(nginx_sites_enabled) \\\n if os.path.isfile(os.path.join(nginx_sites_enabled, name))]))",
"def count(self):\n # TODO not implemented yet\n return 0",
"def get_num_items(self):\r\n return self.num_items",
"def count(self):\n return int()",
"def count(self):\n return int()",
"def test_set_count(self) -> int:\n return pulumi.get(self, \"test_set_count\")",
"def count(self) -> int:\n return pulumi.get(self, \"count\")",
"def config_manager_count(self):\n if \"configManagerCount\" in self._prop_dict:\n return self._prop_dict[\"configManagerCount\"]\n else:\n return None",
"def count(self):\r\n\r\n return len(self.widgets_list)",
"def count(self):\n return self.get_count()",
"def count(self):\n return self.size()",
"def get_count(self):\n\n\t\treturn self.__count",
"def get_count(self):\r\n return self.count",
"def count(self) -> int:\n return self.__count",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def get_control_count(cmd):\n return len(cmd.control_qubits)",
"def size() -> int:\n ..."
] | [
"0.73933834",
"0.73484594",
"0.7338341",
"0.7232637",
"0.69817024",
"0.6793652",
"0.6730186",
"0.66864556",
"0.6681215",
"0.66763896",
"0.66130507",
"0.6599068",
"0.65807015",
"0.65699047",
"0.65699047",
"0.65607816",
"0.6549674",
"0.65447354",
"0.651406",
"0.6512395",
"0.6506413",
"0.65033126",
"0.6501877",
"0.64730114",
"0.6472358",
"0.6472358",
"0.6472358",
"0.6472358",
"0.64703935",
"0.64259046"
] | 0.76182985 | 0 |
Push a single ElasticSearchObject to index. Assumes objects do NOT have an id. | def push(self, es_obj, doc_type=None, refresh=True):
doc_type, es_repr = self._validate_doc_and_get_type_and_repr(es_obj, doc_type)
response = self.conn.elastic_search_client.index(index=self.index_name, doc_type=doc_type,
body=es_repr, refresh=u'true' if refresh else u'false', id=None)
logger.debug("Response: {}".format(response))
print(response)
if '_id' not in response:
logger.error("Could not create object")
logger.error("Object: {}".format(es_obj))
logger.error("Es_repr: {}".format(es_repr))
logger.error("Response: {}".format(response))
return None
id = response['_id']
return id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_object(self, content, object_id = None):\n if object_id is None:\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"POST\", \"/1/indexes/%s\" % self.url_index_name, self.client.timeout, content)\n else:\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"PUT\", \"/1/indexes/%s/%s\" % (self.url_index_name, quote((\"%s\" % object_id).encode('utf8'), safe='')), self.client.timeout, content)",
"def push(self, obj):\n pass",
"def save_object(self, obj):\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"PUT\", \"/1/indexes/%s/%s\" % (self.url_index_name, quote((\"%s\" % obj[\"objectID\"]).encode('utf8'), safe='')), self.client.timeout, obj)",
"def addobj(self, obj):\n self._objslock.acquire()\n if obj.objid in self._objs:\n self._objslock.release()\n raise KeyError(\"non-unique EMANE object id %s for %s\" % (obj.objid, obj))\n self._objs[obj.objid] = obj\n self._objslock.release()",
"def add_object(self, obj):\n\t\tself.objects.append(obj)",
"def add(self, obj):\n self.session.add(obj)",
"def add_object(self, obj: str):\n if obj not in self._objects:\n self._objects.append(obj)\n else:\n raise IDAlreadyExists",
"def add(self, idx, obj):\n if idx in self._objects:\n warning('overriding object %s - use update() instead?' % idx)\n self._objects[idx] = obj\n self._last_insert_idx = idx",
"def insert(self, index, p_object): # real signature unknown; restored from __doc__\n pass",
"def insert_object(self, object: ObjectHandle):\n # Serialize the object descriptor and data part. Both items are stored\n # as separate objects.\n descriptor, data = self.factory.serialize(object)\n object_id = self.store.write_object(descriptor)\n data_id = self.store.write_object(data)\n # Add the object information to the index and write the modified index\n # to the data store.\n self.index[object.namespace][object.name] = StoredObject(\n object_id=object_id,\n data_id=data_id,\n name=object.name,\n descriptor=descriptor\n )\n self._write_index()\n # If the object refers to a default object that object is removed since\n # it has been overwritten by the new object.\n try:\n del self.defaults.get(object.namespace, {})[object.name]\n except KeyError:\n pass",
"def add_object(self, object):\n object.save()",
"def add(self, obj):\n self.getSession().add(obj)\n self.commit() # paranoially\n return obj",
"def add_object(_object):\n print('add_object: ' + str(_object))\n try_insert_or_update(\n models.objects.insert(), # pylint: disable=no-value-for-parameter\n [dict(\n project_id=_object['project_id'], filename=_object['filename'])])",
"def bulk_push_to_elastic(elastic_search_url, index, docs):\n CREATE_TEMPLATE = {\"create\": {\"_index\": index, \"_type\": \"_doc\", \"_id\": \"\"}}\n\n bulk_request_body = \"\"\n for doc in docs:\n CREATE_TEMPLATE[\"create\"][\"_id\"] = doc[HASH_ID]\n bulk_request_body += json.dumps(CREATE_TEMPLATE) + NEW_LINE\n bulk_request_body += json.dumps(doc) + NEW_LINE\n\n # Request\n headers = {\"content-type\": \"application/x-ndjson\"}\n\n url = elastic_search_url + \"/\" + \"_bulk\"\n\n response = requests.post(url, data=bulk_request_body, headers=headers)\n return response",
"def _add_stix_object(self, stix_object: _Observable):\n if stix_object.id in self._all_objects:\n if len(stix_object.serialize()) > len(self._all_objects[stix_object.id].serialize()):\n self._all_objects[stix_object.id] = stix_object\n else:\n self._all_objects[stix_object.id] = stix_object",
"def add(self, object):\n self.lock.acquire()\n self.__Session.add(object)\n self.__Session.commit()\n self.lock.release()",
"def store_object(self, _object):\n\n # replace an existing list member, else, append\n\n index = [self.object_store.index(_object_) for _object_ in self.object_store if _object_.LocalID == _object.LocalID]\n\n if index != []:\n\n self.object_store[index[0]] = _object\n\n #if self.settings.LOG_VERBOSE: logger.debug('Updating a stored object: %s in region \\'%s\\'' % (_object.FullID, self.region.SimName))\n\n else:\n\n self.object_store.append(_object)\n\n #if self.settings.LOG_VERBOSE: logger.debug('Stored a new object: %s in region \\'%s\\'' % (_object.LocalID, self.region.SimName))",
"def add_object(self, obj):\n self._objects.append(obj)",
"def index_fobj(fobj):\n doc = fileobject_to_dict(fobj)\n if doc is not None:\n #print doc\n SOLR.add(doc)\n else:\n pass",
"def upsert(self, obj):\r\n url = '{0}/upsert'.format(self.get_url())\r\n request = http.Request('PUT', url, self.wrap_object(obj))\r\n\r\n return request, parsers.parse_empty",
"def add(self, obj: object) -> None:\n self._contains.append(obj)",
"def create_document(obj):\n index = obj.get_index_name()\n doc_type = obj.get_document_type()\n body = obj.get_document_body()\n exists = ES.exists(index=index, doc_type=doc_type, id=obj.pk)\n\n if not exists:\n ES.create(index=index, doc_type=doc_type, body=body, id=obj.pk)\n return None\n\n return \"Conflict: document already exists for {0} with id {1}.\".format(\n obj.__class__.__name__, obj.pk)",
"def add(self, obj: T) -> None:\n self._items.append(obj)\n self._size += 1",
"def post(body):\n es = Elasticsearch([ELASTIC_SEARCH_HOST], http_auth=ELASTIC_SEARCH_AUTH, scheme=\"https\", port=ELASTIC_SEARCH_PORT)\n\n # Create Index If not present on host\n if not es.indices.exists('newdata'):\n es.indices.create('newdata')\n\n # Create Document in index\n entry = es.index(index='newdata', doc_type=body[\"data\"][\"type\"], body=body[\"data\"][\"attributes\"])\n response = dict(data=dict(id=entry[\"_id\"], type=entry[\"_type\"], attributes=dict(index=entry[\"_index\"])))\n return response, 201",
"def add(self, obj):\n raise NotImplementedError",
"def append(self, obj):\r\n self.record_count += 1\r\n \r\n if type(obj) == dict:\r\n self._probe_record(obj)\r\n else:\r\n self._probe_row(obj)",
"def _add_object(self, object_dict):\n # Attempt to map the object first. This will raise an\n # ItemExistsError if a named object of the same type already\n # exists.\n self._add_object_to_map(self.append_key, object_dict)\n\n # Add the object to the end of the model.\n # TODO: which objects need added to the beginning?\n self.model_dict[self.append_key] = object_dict\n\n # Update append key.\n self._update_append_key()",
"def _add_to_index( env, meta_dict, file_str, logger ):\n global adapter_glob\n if adapter_glob is not None:\n adapter = adapter_glob\n else:\n logger.warning( u\"Connecting to index...\" )\n adapter = adapter_file.adapter(env)\n adapter_glob = adapter\n doc = document(\n env[\"metadata\"][\"known_keys\"].keys(),\n meta_dict,\n env,\n )\n return adapter.add(doc, boosts=env[\"metadata\"][\"boosts\"])\n #logger.info(u\"Added to index [%s]\", file_str)",
"def put(cls, obj):\n return cls(DaskWrapper.put(obj, hash=False), len(obj.index), len(obj.columns))",
"def new(self, obj):\n\n self.__session.add(obj)"
] | [
"0.70250475",
"0.66799045",
"0.6536268",
"0.6320226",
"0.62905204",
"0.6278442",
"0.6247867",
"0.62353545",
"0.6212382",
"0.6159699",
"0.60891485",
"0.60850763",
"0.6084126",
"0.6069489",
"0.6048055",
"0.601098",
"0.601018",
"0.59969294",
"0.59266204",
"0.5904166",
"0.5878974",
"0.5859077",
"0.5854742",
"0.58343935",
"0.5832538",
"0.58026946",
"0.57928",
"0.5735954",
"0.5733622",
"0.57309926"
] | 0.72160566 | 0 |
Returns the text of a child node found by name. Only one such named child is expected. | def getSingleChildTextByName(rootNode, name):
try:
nodeList = [e.firstChild.data for e in rootNode.childNodes if e.localName == name]
if len(nodeList) > 0:
return nodeList[0]
else:
return None
except AttributeError:
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_child(self, name):\n for n in self.children:\n if n.name == name:\n return n\n\n raise ChildError(\"Can't find child node '{name}'\".format(**locals()))",
"def get_child(node, name):\r\n for child in node.childNodes:\r\n if child.localName == name:\r\n return child",
"def get_child(self, name):\n return name, self._children[name]",
"def get_child(self, name):\n return next((x for x in self.children if x.name == name), None)",
"def getSingleChildTextByNameNS(rootNode, ns, name):\n try:\n nodeList = [e.firstChild.data for e in rootNode.childNodes if e.localName == name and e.namespaceURI == ns]\n if len(nodeList) > 0:\n return nodeList[0]\n else:\n return None\n except AttributeError:\n return None",
"def findChild(self, name):\n\n # Note - this returns the first child of the given name\n # any other children with similar names down the tree\n # is not considered.\n \n for child in self.getAllChildren():\n if child.getName() == name:\n return child",
"def visit_name(self, node, children):\n name = ''.join(children)\n return name",
"def getChild(self, name):\n \n for child in self._children:\n if child.getName() == name:\n return child",
"def from_name(self, name: str) -> t.Sequence[etree.Element]:\n return self.child_tags[name]",
"def getSingleChildByName(rootNode, name):\n nodeList = [e for e in rootNode.childNodes if e.localName == name]\n if len(nodeList) > 0:\n return nodeList[0]\n else:\n return None",
"def get_xml_node_value (root, name):\n node = root.find(name)\n\n if not node:\n return None\n elif len(node.text) < 1:\n return None\n else:\n return node.text.strip()",
"def add_child(self, name):\n\n if self._text is not None:\n raise MarshallingError(f'Cannot add the child \"{name}\" the text element \"{self._name}\"')\n\n child = Element(name)\n self._children.append(child)\n return child",
"def get_name(self):\n return self.children[0]",
"def get_node_text(self, node):\n text_children = [n.nodeValue for n in self.get_node_children(node)\n if n.nodeType == xml.dom.Node.TEXT_NODE]\n if text_children:\n return ''.join(text_children)\n else:\n return None",
"def subNode(self, name):\n for nd in self.kids:\n if nd.name == name:\n return nd\n raise LookupError( 'name not found \"' + name + '\"' )",
"def test_getter_child_text(self):\n root = netapp_api.NaElement('root')\n root.set_content('FAKE_CONTENT')\n self.mock_object(root, 'get_child_by_name', return_value=root)\n\n self.assertEqual('FAKE_CONTENT',\n root.__getitem__('root'))",
"def find_node(self, name):\n for node in self.get_children():\n if node.read_name() == name:\n break\n else:\n node = None\n return node",
"def get_node_by_name(self, name):\r\n root = self.get_xml_root()\r\n return root.find(name)",
"def getChildrenByName(rootNode, name):\n return [e for e in rootNode.childNodes if e.localName == name]",
"def _get_child_text(node):\n for child in node.children:\n if isinstance(child, NavigableString) and not isinstance(child, Comment):\n yield child.split()",
"def get_child_data(node, tag_name, default = None):\n n = first_child(node, tag_name)\n if n and n.firstChild:\n return n.firstChild.data\n else:\n return default",
"def get_name_value(name_node):\n return name_node.id",
"def find_named_node(graph, name):\n children = []\n\n for child in _iter_nested_children(graph):\n if isinstance(child, (tree.Name, tree.Function)):\n if hasattr(child, \"name\"):\n name_ = child.name\n else:\n name_ = child.value\n\n if name_ == name:\n children.append(child)\n\n if not children:\n return None\n\n return children[-1]",
"def first_child(tree, name, recurse = False):\n name = name.lower()\n if not tree.hasChildNodes():\n return None\n for child in tree.childNodes:\n if child.nodeType != child.ELEMENT_NODE:\n continue\n if child.tagName.lower() == name:\n return child\n return None",
"def get_child(self, character):\n if self.has_child(character):\n index = self._get_index(character.upper())\n return self.children[index]\n else:\n raise ValueError(f'No child exists for character {character!r}')",
"def __getChildViewText(self, parentId, childSeq):\n child_view = self.__getChildView(parentId, childSeq)\n if child_view:\n printLog(self.threadName + '[__getChildViewText] found child view of parent %s ' % parentId)\n # np = child_view.namedProperties\n # print np\n # return np.get('text:mText').value.encode(sys.getdefaultencoding())\n return child_view.getText()\n else:\n printLog(self.threadName + '[__getChildViewText] view not found.', logging.ERROR)\n self.resultFlag = False\n return ''",
"def get(self, node_name, aslist=False):\n offset = 0\n if re.search(\"\\[\", node_name):\n node_name, tmpstr = node_name.split(\"[\")\n offset = int(tmpstr.split(\"]\")[0])\n rtnData = self.data.findAll(node_name)\n if aslist:\n return rtnData\n else:\n if len(rtnData) == 0:\n return \"\"\n return rtnData[offset].text",
"def getNode(node, name):\n out = filter( lambda n: n.nodeType == n.ELEMENT_NODE and n.tagName == name,\n node.childNodes )\n if len(out) != 1:\n raise NodeError(name, node)\n return out[0]",
"def node(self, name):\r\n return self.nodes[name]",
"def __getitem__(self, name):\n tag = self._find(name)\n if tag is not None:\n return tag.text\n raise KeyError(name)"
] | [
"0.73368084",
"0.727436",
"0.709972",
"0.69880795",
"0.6772942",
"0.66547203",
"0.6558581",
"0.6543391",
"0.63135356",
"0.6266445",
"0.61968386",
"0.6183661",
"0.6125282",
"0.610355",
"0.6097007",
"0.60358995",
"0.6027794",
"0.6010082",
"0.5985832",
"0.5980697",
"0.5976921",
"0.5957283",
"0.5938346",
"0.58730257",
"0.5850388",
"0.5829589",
"0.5827065",
"0.5824994",
"0.5740648",
"0.57354015"
] | 0.7597116 | 0 |
Returns the text of a child node found by name and namespaceURI. Only one such named child is expected. | def getSingleChildTextByNameNS(rootNode, ns, name):
try:
nodeList = [e.firstChild.data for e in rootNode.childNodes if e.localName == name and e.namespaceURI == ns]
if len(nodeList) > 0:
return nodeList[0]
else:
return None
except AttributeError:
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_child(node, name):\r\n for child in node.childNodes:\r\n if child.localName == name:\r\n return child",
"def getSingleChildTextByName(rootNode, name):\n try:\n nodeList = [e.firstChild.data for e in rootNode.childNodes if e.localName == name]\n if len(nodeList) > 0:\n return nodeList[0]\n else:\n return None\n except AttributeError:\n return None",
"def get_child(self, name):\n for n in self.children:\n if n.name == name:\n return n\n\n raise ChildError(\"Can't find child node '{name}'\".format(**locals()))",
"def test_getter_child_text(self):\n root = netapp_api.NaElement('root')\n root.set_content('FAKE_CONTENT')\n self.mock_object(root, 'get_child_by_name', return_value=root)\n\n self.assertEqual('FAKE_CONTENT',\n root.__getitem__('root'))",
"def get_xml_node_value (root, name):\n node = root.find(name)\n\n if not node:\n return None\n elif len(node.text) < 1:\n return None\n else:\n return node.text.strip()",
"def getSingleChildByNameNS(rootNode, ns, name):\n nodeList = [e for e in rootNode.childNodes if e.localName == name and e.namespaceURI == ns]\n if len(nodeList) > 0:\n return nodeList[0]\n else:\n return None",
"def get_child(self, name):\n return name, self._children[name]",
"def get_node_text(self, node):\n text_children = [n.nodeValue for n in self.get_node_children(node)\n if n.nodeType == xml.dom.Node.TEXT_NODE]\n if text_children:\n return ''.join(text_children)\n else:\n return None",
"def getSingleChildByName(rootNode, name):\n nodeList = [e for e in rootNode.childNodes if e.localName == name]\n if len(nodeList) > 0:\n return nodeList[0]\n else:\n return None",
"def _get_child_text(node):\n for child in node.children:\n if isinstance(child, NavigableString) and not isinstance(child, Comment):\n yield child.split()",
"def get_child_data(node, tag_name, default = None):\n n = first_child(node, tag_name)\n if n and n.firstChild:\n return n.firstChild.data\n else:\n return default",
"def get_child(self, name):\n return next((x for x in self.children if x.name == name), None)",
"def visit_name(self, node, children):\n name = ''.join(children)\n return name",
"def get_name_value(name_node):\n return name_node.id",
"def get_name(self):\n return self.children[0]",
"def getChild(self, *args):\n return _libsbml.XMLNode_getChild(self, *args)",
"def getChildrenByNameNS(rootNode, ns, name):\n return [e for e in rootNode.childNodes if e.localName == name and e.namespaceURI == ns]",
"def newTextChild(self, ns, name, content):\n if ns is None: ns__o = None\n else: ns__o = ns._o\n ret = libxml2mod.xmlNewTextChild(self._o, ns__o, name, content)\n if ret is None:raise treeError('xmlNewTextChild() failed')\n __tmp = xmlNode(_obj=ret)\n return __tmp",
"def getChildElementValueByTagName(element: org.w3c.dom.Element, string: str) -> str:\n ...",
"def get_text(self, xml, name, nodetype):\n nodes = xml.getElementsByTagName(\"wp:comment_\" + name)[0].childNodes\n return \"\".join([n.data for n in nodes if n.nodeType == nodetype])",
"def add_child(self, name):\n\n if self._text is not None:\n raise MarshallingError(f'Cannot add the child \"{name}\" the text element \"{self._name}\"')\n\n child = Element(name)\n self._children.append(child)\n return child",
"def __get_node_name (self):\n import pyxb.namespace\n return pyxb.namespace.ExpandedName(self.node.namespaceURI, self.node.localName)",
"def __getChildViewText(self, parentId, childSeq):\n child_view = self.__getChildView(parentId, childSeq)\n if child_view:\n printLog(self.threadName + '[__getChildViewText] found child view of parent %s ' % parentId)\n # np = child_view.namedProperties\n # print np\n # return np.get('text:mText').value.encode(sys.getdefaultencoding())\n return child_view.getText()\n else:\n printLog(self.threadName + '[__getChildViewText] view not found.', logging.ERROR)\n self.resultFlag = False\n return ''",
"def get_text(self):\n c=self.xpath_eval(\"ns:*\")\n if not c:\n self.upgrade()\n t=self.xpath_eval(\"ns:text\")\n if not t:\n return None\n return from_utf8(t[0].getContent())",
"def getChildrenByName(rootNode, name):\n return [e for e in rootNode.childNodes if e.localName == name]",
"def getNode(node, name):\n out = filter( lambda n: n.nodeType == n.ELEMENT_NODE and n.tagName == name,\n node.childNodes )\n if len(out) != 1:\n raise NodeError(name, node)\n return out[0]",
"def get_node_text(self):\n return self.node_text",
"def getvalueofnode(node):\r\n return node.text if node is not None else None",
"def subNode(self, name):\n for nd in self.kids:\n if nd.name == name:\n return nd\n raise LookupError( 'name not found \"' + name + '\"' )",
"def getChild(self, name):\n \n for child in self._children:\n if child.getName() == name:\n return child"
] | [
"0.70851547",
"0.70435005",
"0.6444718",
"0.6246965",
"0.6225065",
"0.61497194",
"0.60729676",
"0.59668297",
"0.5915806",
"0.58188534",
"0.5817289",
"0.58145833",
"0.5802411",
"0.57501686",
"0.5727612",
"0.57120925",
"0.5695159",
"0.5669567",
"0.5641456",
"0.5641404",
"0.56108254",
"0.55785763",
"0.55626947",
"0.55567914",
"0.5504589",
"0.5459198",
"0.544007",
"0.54091156",
"0.5408074",
"0.54005533"
] | 0.7306041 | 0 |
Returns a descendent node found by a list of names and namespaceURIs forming a path. The path is expected to define a unique node. | def getSingleChildByPathNS(rootNode, path):
parentNode = rootNode
for (ns, name) in path:
node = getSingleChildByNameNS(parentNode, ns, name)
if node == None:
return None
else:
parentNode = node
return node | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ParsePath(p_names: Iterable[Text]) -> gnmi_pb2.Path:\n gnmi_elems = []\n for word in p_names:\n word_search = _RE_PATH_COMPONENT.search(word)\n if not word_search: # Invalid path specified.\n raise XpathError('xpath component parse error: %s' % word)\n if word_search.group('key') is not None: # A path key was provided.\n tmp_key = {}\n for x in re.findall(r'\\[([^]]*)\\]', word):\n tmp_key[x.split('=')[0]] = x.split('=')[-1]\n gnmi_elems.append(gnmi_pb2.PathElem(name=word_search.group(\n 'pname'), key=tmp_key))\n else:\n gnmi_elems.append(gnmi_pb2.PathElem(name=word, key={}))\n return gnmi_pb2.Path(elem=gnmi_elems)",
"def _parse_path(p_names):\n gnmi_elems = []\n for word in p_names:\n word_search = _RE_PATH_COMPONENT.search(word)\n if not word_search: # Invalid path specified.\n raise XpathError('xpath component parse error: %s' % word)\n if word_search.group('key') is not None: # A path key was provided.\n tmp_key = {}\n for x in re.findall(r'\\[([^]]*)\\]', word):\n tmp_key[x.split(\"=\")[0]] = x.split(\"=\")[-1]\n gnmi_elems.append(gnmi_pb2.PathElem(name=word_search.group(\n 'pname'), key=tmp_key))\n else:\n gnmi_elems.append(gnmi_pb2.PathElem(name=word, key={}))\n return gnmi_pb2.Path(elem=gnmi_elems)",
"def subNode(self, name):\n for nd in self.kids:\n if nd.name == name:\n return nd\n raise LookupError( 'name not found \"' + name + '\"' )",
"def nameToNode(name):\n\n pass",
"def node_find_by_name( fdt, node_name, starting_node = 0, multi_match=False ):\n\n matching_nodes = []\n matching_node = None\n\n search_active = False\n if starting_node == \"/\" or starting_node == 0:\n search_active = True\n\n for node in fdt.node_iter():\n if not search_active:\n if node.path == starting_node:\n search_active = True\n\n if search_active:\n if node.name == node_name:\n if not matching_nodes:\n matching_node = node\n matching_nodes.append( node )\n\n return matching_node, matching_nodes",
"def lookup_element(self, name: str) -> ElementNSEntry:\n for i, scope in enumerate(reversed(self.element_ns_stack)):\n if name in scope:\n el, parent_def = scope[name]\n if i == 0:\n # Return anything from local namespace\n return (el, parent_def)\n elif isinstance(el, comp.Signal):\n # Signals are allowed to be found in parent namespaces\n return (el, parent_def)\n elif self.parent_parameters_visible and isinstance(el, Parameter):\n # Parameters are allowed to be found in parent namespaces,\n # except in some contexts\n return (el, parent_def)\n return (None, None)",
"def getSingleChildByPath(rootNode, path):\n parentNode = rootNode\n for name in path:\n node = getSingleChildByName(parentNode, name)\n if node == None:\n return None\n else:\n parentNode = node\n return node",
"def _path_names_to_edges(self, node_names):\n cur_node = self.root\n edge_path = []\n for node_name in node_names:\n next_node = None\n for edge in self.edges_from(cur_node.id):\n if self.nodes[edge.dst].name == node_name:\n edge_path.append(edge)\n next_node = self.nodes[edge.dst]\n break\n if next_node is None:\n raise Exception(\"No edge found from {0} to {1}\".format(cur_node.name, node_name))\n else:\n cur_node = next_node\n return edge_path",
"def dig(node, *subElements):\n if not node:\n return None\n for name in subElements:\n nextNode = None\n for child in node.childNodes:\n if child.nodeType == child.ELEMENT_NODE and child.nodeName == name:\n nextNode = child\n break\n if nextNode:\n node = nextNode\n else:\n return None\n return node",
"def ex_get_node_by_name(self, name):\n domain = self._get_domain_for_name(name=name)\n node = self._to_node(domain=domain)\n return node",
"def nodeFromName(self, name):\n for item in self.items():\n if isinstance(item, NodeItem):\n if item.name() == name:\n return item\n return None",
"def resolutionNode(*args, name: AnyStr=\"\", parent: AnyStr=\"\", shared: bool=True, skipSelect:\n bool=True, **kwargs)->AnyStr:\n pass",
"def domFindElementByPath( node, astrElementPathName ):\n \"\"\"eg: [\"starting-condition\", \"condition\", \"script_type\"] \"\"\"\n element = node;\n for name in astrElementPathName:\n element = domFindElement( element, name );\n if( element == None ):\n return None;\n return element;",
"def locate_last_node(self, name):\n name = name.toUri()\n path = self.name_to_path(name)\n # create a cypher query to match the path\n try:\n query = self.create_path_query(path, 'MATCH')\n except UnsupportedQueryException as ex:\n print 'Error: extract_from_repo: %s' % str(ex)\n\n records = neo4j.CypherQuery(self.db_handler, query).execute()\n if not records:\n return None\n # in the name tree there should be AT MOST one match for a \n # given name prefix\n assert(len(records.data) == 1)\n assert(len(records.data[0].values) == 1)\n last_node = records.data[0].values[0]\n\n return last_node",
"def create_namespace_tree(dotted_names):\r\n ret = {}\r\n for dn in dotted_names:\r\n path = dn.split('.')\r\n for i in xrange(len(path)):\r\n ns = '.'.join(path[:i])\r\n itempath = '.'.join(path[:i + 1])\r\n if ns not in ret:\r\n ret[ns] = []\r\n if itempath not in ret[ns]:\r\n ret[ns].append(itempath)\r\n return ret",
"def derive_path(self, path):\n next_node = self\n for identifier in path:\n next_node = next_node.derive_one(identifier)\n\n return next_node",
"def resolve(self,nameseq):\n assert(is_seq(nameseq) and len(nameseq) >= 1)\n if len(nameseq) > 1:\n return self.nodes()[nameseq[0]].resolve(nameseq[1:])\n else:\n return self.nodes()[nameseq[0]]",
"def getChildrenByNameNS(rootNode, ns, name):\n return [e for e in rootNode.childNodes if e.localName == name and e.namespaceURI == ns]",
"def generateElementsQNamed(list, name, uri):\n for n in list:\n if IElement.providedBy(n) and n.name == name and n.uri == uri:\n yield n",
"def node_lookup_bulk(self, paths):\n\n placeholders = ','.join('?' for path in paths)\n q = \"select node from nodes where path in (%s)\" % placeholders\n self.execute(q, paths)\n r = self.fetchall()\n if r is not None:\n return [row[0] for row in r]\n return None",
"def test_compute_qname(self):\n g = Graph()\n self.assertEqual(\n g.compute_qname(URIRef(\"http://foo/bar/baz\")),\n (\"ns1\", URIRef(\"http://foo/bar/\"), \"baz\"),\n )\n\n self.assertEqual(\n g.compute_qname(URIRef(\"http://foo/bar#baz\")),\n (\"ns2\", URIRef(\"http://foo/bar#\"), \"baz\"),\n )\n\n # should skip to ns4 when ns3 is already assigned\n g.bind(\"ns3\", URIRef(\"http://example.org/\"))\n self.assertEqual(\n g.compute_qname(URIRef(\"http://blip/blop\")),\n (\"ns4\", URIRef(\"http://blip/\"), \"blop\"),\n )\n\n # should return empty qnames correctly\n self.assertEqual(\n g.compute_qname(URIRef(\"http://foo/bar/\")),\n (\"ns1\", URIRef(\"http://foo/bar/\"), \"\"),\n )\n\n # should compute qnames of URNs correctly as well\n self.assertEqual(\n g.compute_qname(URIRef(\"urn:ISSN:0167-6423\")),\n (\"ns5\", URIRef(\"urn:ISSN:\"), \"0167-6423\"),\n )\n\n self.assertEqual(\n g.compute_qname(URIRef(\"urn:ISSN:\")),\n (\"ns5\", URIRef(\"urn:ISSN:\"), \"\"),\n )\n\n # should compute qnames with parenthesis correctly\n self.assertEqual(\n g.compute_qname(URIRef(\"http://foo/bar/name_with_(parenthesis)\")),\n (\"ns1\", URIRef(\"http://foo/bar/\"), \"name_with_(parenthesis)\"),\n )",
"def getSingleChildByNameNS(rootNode, ns, name):\n nodeList = [e for e in rootNode.childNodes if e.localName == name and e.namespaceURI == ns]\n if len(nodeList) > 0:\n return nodeList[0]\n else:\n return None",
"def get_node_by_full_object(self, name):\n return self.get_node_paths_by_full_object(name)[-1]",
"def name_to_node(name):\n selectionList = MSelectionList()\n selectionList.add(name)\n node = MObject()\n selectionList.getDependNode(0, node)\n return node",
"def traverse(name, furtherPath):",
"def xpath (entry, path):\n if isinstance (path, str):\n path = path.split('/')\n result=entry\n for key in path: result=result[key]\n return result",
"def get_subtree(self, names, keep_root_branch=False):\n nodes = self.get_nodes(names)\n rca = self.get_recent_common_ancestor(nodes)\n return self.get_node_subtree(rca, keep_root_branch)",
"def find_named_node(graph, name):\n children = []\n\n for child in _iter_nested_children(graph):\n if isinstance(child, (tree.Name, tree.Function)):\n if hasattr(child, \"name\"):\n name_ = child.name\n else:\n name_ = child.value\n\n if name_ == name:\n children.append(child)\n\n if not children:\n return None\n\n return children[-1]",
"def get_child(node, name):\r\n for child in node.childNodes:\r\n if child.localName == name:\r\n return child",
"def get_node_by_name(self, name):\r\n root = self.get_xml_root()\r\n return root.find(name)"
] | [
"0.5617993",
"0.5379932",
"0.5344396",
"0.52256453",
"0.5165489",
"0.5163145",
"0.5140036",
"0.49797037",
"0.49483255",
"0.49193937",
"0.4904224",
"0.4882593",
"0.48682582",
"0.48599708",
"0.48452106",
"0.482554",
"0.47848898",
"0.4771717",
"0.47439066",
"0.47428063",
"0.47381613",
"0.47122383",
"0.46929064",
"0.467712",
"0.46567836",
"0.46559966",
"0.462799",
"0.46255848",
"0.46238422",
"0.45962024"
] | 0.5693317 | 0 |
Returns all child nodes of a specified name. | def getChildrenByName(rootNode, name):
return [e for e in rootNode.childNodes if e.localName == name] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def findChildren(self, name):\n\n # Note: this returns a list of all the children of a given\n # name, irrespective of the depth of look-up.\n \n children = []\n \n for child in self.getAllChildren():\n if child.getName() == name:\n children.append(child)\n\n return children",
"def getElements(self, name=\"\"):\n\n if not name:\n return self.children\n else:\n elements = []\n for element in self.children:\n if element.name == name:\n elements.append(element)\n return elements",
"def getChildrenByNameNS(rootNode, ns, name):\n return [e for e in rootNode.childNodes if e.localName == name and e.namespaceURI == ns]",
"def get_children(self):\n return NodeList(self._my_map['childNodes'])",
"def get_child_nodes(node):\r\n return list(iter_child_nodes(node))",
"def getAllChildrenWithTagName(elm, tagname):\n if elm.hasChildNodes() == True:\n elms = []\n for c in elm.childNodes:\n if c.nodeType == Node.ELEMENT_NODE and c.tagName == tagname:\n elms.append(c)\n return elms\n else:\n return None",
"def from_name(self, name: str) -> t.Sequence[etree.Element]:\n return self.child_tags[name]",
"def getChildNodes( self, path ):\n\n return self.db.childNodes( path )",
"def get(node: md.Document, name: str) -> mc.Nodelist:\n return node.getElementsByTagName(name)",
"def get_children(self, table_name):\n return self._child_map[table_name]",
"def children_of(self, member_name):\n for relationship in self.relationships[member_name]:\n yield self.members[relationship.child]",
"def get_child(node, name):\r\n for child in node.childNodes:\r\n if child.localName == name:\r\n return child",
"def find_children(self, name, recursive=True) -> Sequence['Component']:\n return ()",
"def findAll(self, name=None, attrs={}, recursive=True, text=None,\r\n limit=None, **kwargs):\r\n generator = self.recursiveChildGenerator\r\n if not recursive:\r\n generator = self.childGenerator\r\n return self._findAll(name, attrs, text, limit, generator, **kwargs)",
"def get_children(self):\n std = self._std\n bld = self._bld\n cls = self.__class__\n\n root = self.get_sobj()\n cit = std.NewChildIterator(root)\n cit.InitEx(0)\n\n children = []\n while cit.More():\n node = cls(std, bld, cit.Value().GetID(), self)\n if node.is_alive():\n children.append(node)\n cit.Next()\n return children",
"def get_node_children(self, node):\n return node.children",
"def children(node):\n\n return snd(node)",
"def get_children_elements(self):\n\n pass",
"def get_by_tag(self, name):\n return [XmlWrapper(i) for i in self.node.getElementsByTagName(name)]",
"def get_by_tag(self, name):\n return [XmlWrapper(i) for i in self.node.getElementsByTagName(name)]",
"def get_child_znodes(cluster_name, path):\n zoo_client = ZookeeperService.get_zoo_client(cluster_name)\n child_znodes = []\n\n children = zoo_client.get_children(path)\n # iter child nodes and convert to dict with extra info\n for child in children:\n child_path = os.path.join(path, child)\n data, _ = zoo_client.get(child_path)\n # node\n node = {\"path\": child_path, \"value\": data}\n node[\"name\"] = child_path.rsplit('/', 1)[-1]\n child_znodes.append(node)\n return child_znodes",
"def getChildElements(doc):\n for child in doc.childNodes:\n if child.nodeType == child.ELEMENT_NODE:\n yield child",
"def getchildren(self):\n return self.root.getchildren()",
"def get_children(self):\n\n pass",
"def getChild(self, name):\n \n for child in self._children:\n if child.getName() == name:\n return child",
"def findChildren(widget=None, name=\"\", text=\"\"):\n\t\t# TODO: figure out why the native QWidget.findChildren method\n\t\t# does not seem to work from PythonQt\n\t\tif not widget:\n\t\t\twidget = mainWindow()\n\t\tchildren = []\n\t\tparents = [widget]\n\t\twhile parents != []:\n\t\t\tp = parents.pop()\n\t\t\tparents += p.children()\n\t\t\tif name and p.name.find(name) >= 0:\n\t\t\t\tchildren.append(p)\n\t\t\telif text:\n\t\t\t\ttry:\n\t\t\t\t\tp.text\n\t\t\t\t\tif p.text.find(text) >= 0:\n\t\t\t\t\t\tchildren.append(p)\n\t\t\t\texcept AttributeError:\n\t\t\t\t\tpass\n\t\treturn children",
"def get_child(self, name):\n return name, self._children[name]",
"def children(self):\n l = []\n n = self.node.firstChild\n while n:\n l.append(XmlWrapper(n))\n n = n.nextSibling\n return l",
"def children(self):\n l = []\n n = self.node.firstChild\n while n:\n l.append(XmlWrapper(n))\n n = n.nextSibling\n return l",
"def get_child(self, name):\n for n in self.children:\n if n.name == name:\n return n\n\n raise ChildError(\"Can't find child node '{name}'\".format(**locals()))"
] | [
"0.78755844",
"0.76069707",
"0.70648605",
"0.6875181",
"0.686096",
"0.68549895",
"0.6836145",
"0.67805845",
"0.6676841",
"0.6539384",
"0.65334934",
"0.65248793",
"0.6507252",
"0.6486434",
"0.6399938",
"0.6362787",
"0.63362265",
"0.6320614",
"0.6318085",
"0.6318085",
"0.62961006",
"0.6271237",
"0.6250283",
"0.6170669",
"0.61644316",
"0.61319566",
"0.61317253",
"0.6104958",
"0.6104958",
"0.609021"
] | 0.79214627 | 0 |
Returns all child nodes of a specified name and namespaceURI. | def getChildrenByNameNS(rootNode, ns, name):
return [e for e in rootNode.childNodes if e.localName == name and e.namespaceURI == ns] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getChildrenByName(rootNode, name):\n return [e for e in rootNode.childNodes if e.localName == name]",
"def getElements(self, name=\"\"):\n\n if not name:\n return self.children\n else:\n elements = []\n for element in self.children:\n if element.name == name:\n elements.append(element)\n return elements",
"def get_child_nodes(node):\r\n return list(iter_child_nodes(node))",
"def get_children(self):\n return NodeList(self._my_map['childNodes'])",
"def get(node: md.Document, name: str) -> mc.Nodelist:\n return node.getElementsByTagName(name)",
"def getAllChildrenWithTagName(elm, tagname):\n if elm.hasChildNodes() == True:\n elms = []\n for c in elm.childNodes:\n if c.nodeType == Node.ELEMENT_NODE and c.tagName == tagname:\n elms.append(c)\n return elms\n else:\n return None",
"def elements(self, uri=None, name=None):\n if name is None:\n return generateOnlyInterface(self.children, IElement)\n else:\n return generateElementsQNamed(self.children, name, uri)",
"def findChildren(self, name):\n\n # Note: this returns a list of all the children of a given\n # name, irrespective of the depth of look-up.\n \n children = []\n \n for child in self.getAllChildren():\n if child.getName() == name:\n children.append(child)\n\n return children",
"def get_child(node, name):\r\n for child in node.childNodes:\r\n if child.localName == name:\r\n return child",
"def getChildNodes( self, path ):\n\n return self.db.childNodes( path )",
"def test_iterChildNodesByTagName(self):\n _node = MagicMock()\n _node.childNodes = self._createNodeList([\n (1, 'abba'),\n (2, 'trara'),\n (4, 'child'),\n (3, 'child'),\n (4, 'child')\n ])\n _test_object = Node(_node)\n values = list(_test_object.iterChildNodesByTagName('child'))\n self.assertListEqual(\n values, [_node.childNodes[2], _node.childNodes[4]])",
"def getchildren(self):\n return self.root.getchildren()",
"def getChildElements(doc):\n for child in doc.childNodes:\n if child.nodeType == child.ELEMENT_NODE:\n yield child",
"def s_all_descendants(node):\r\n if len(node.children)==0:\r\n return []\r\n else:\r\n children = node.children[:]\r\n for child in node.children:\r\n children.extend(Node.s_all_descendants(child))\r\n return children",
"def children(self):\n l = []\n n = self.node.firstChild\n while n:\n l.append(XmlWrapper(n))\n n = n.nextSibling\n return l",
"def children(self):\n l = []\n n = self.node.firstChild\n while n:\n l.append(XmlWrapper(n))\n n = n.nextSibling\n return l",
"def get_children(self):\n std = self._std\n bld = self._bld\n cls = self.__class__\n\n root = self.get_sobj()\n cit = std.NewChildIterator(root)\n cit.InitEx(0)\n\n children = []\n while cit.More():\n node = cls(std, bld, cit.Value().GetID(), self)\n if node.is_alive():\n children.append(node)\n cit.Next()\n return children",
"def get_by_tag(self, name):\n return [XmlWrapper(i) for i in self.node.getElementsByTagName(name)]",
"def get_by_tag(self, name):\n return [XmlWrapper(i) for i in self.node.getElementsByTagName(name)]",
"def getChildElementsByTagName(element: org.w3c.dom.Element, string: str) -> java.util.List:\n ...",
"def get_children_with_name(self, prefix):\n prefix = prefix.lower()\n for childname, child in self.children.items():\n if childname.lower().startswith(prefix):\n yield child",
"def get_children_elements(self):\n\n pass",
"def from_name(self, name: str) -> t.Sequence[etree.Element]:\n return self.child_tags[name]",
"def all_nodes(self, namespace=None):\n source = self._source(namespace)\n return self._list(source, 'all')",
"def children(node):\n\n return snd(node)",
"def get_node_children(self, node):\n return node.children",
"def get_child_znodes(cluster_name, path):\n zoo_client = ZookeeperService.get_zoo_client(cluster_name)\n child_znodes = []\n\n children = zoo_client.get_children(path)\n # iter child nodes and convert to dict with extra info\n for child in children:\n child_path = os.path.join(path, child)\n data, _ = zoo_client.get(child_path)\n # node\n node = {\"path\": child_path, \"value\": data}\n node[\"name\"] = child_path.rsplit('/', 1)[-1]\n child_znodes.append(node)\n return child_znodes",
"def getSingleChildByNameNS(rootNode, ns, name):\n nodeList = [e for e in rootNode.childNodes if e.localName == name and e.namespaceURI == ns]\n if len(nodeList) > 0:\n return nodeList[0]\n else:\n return None",
"def get_children(cls, node: ast.AST) -> Iterable[ast.AST]:\n body: Optional[Sequence[ast.AST]] = getattr(node, 'body', None)\n if body is not None:\n for child in body:\n yield child",
"def children_of(self, member_name):\n for relationship in self.relationships[member_name]:\n yield self.members[relationship.child]"
] | [
"0.7223935",
"0.6433308",
"0.6407375",
"0.6367358",
"0.63064814",
"0.6166133",
"0.6032213",
"0.59840125",
"0.59068906",
"0.5855462",
"0.5814537",
"0.58058417",
"0.577752",
"0.5765865",
"0.5721846",
"0.5721846",
"0.5707831",
"0.5676966",
"0.5676966",
"0.564972",
"0.5647398",
"0.56357944",
"0.559298",
"0.5553754",
"0.55421966",
"0.5503034",
"0.5498758",
"0.54848766",
"0.5480493",
"0.5463848"
] | 0.77882755 | 0 |
Add new node to the Pipeline | def add_node(self, new_node: 'GraphNode'):
self.operator.add_node(new_node) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_node(self, node):",
"def add_node(self, node):\n self.nodes.append(node)",
"def add_node (self, node):\n raise NotImplementedError",
"def add_node(self, node):\n self.nodes.add(node)",
"def addNode(self, node: Node):\n self.nodes.append(node)",
"def add_node(self, node):\n self.nodes[node.name] = node\n self.dirty = True",
"def AddNode(self, node):\n self.nodes.append(node)\n return node",
"def register_node(self, node):\n self.nodes.add(node)",
"def addNode (self, node):\n self.__nodes.add(node)",
"def _add_node(self, node: int) -> None:\r\n self.nodes.add(node)",
"def add_node(self,node):\n \n vertex = Vertex(node)\n \n self.nodes[node] = vertex\n self.numNodes += 1",
"def add_node(self, node):\n self.nodes[node.id] = node\n\n self.layers = max(self.layers, node.layer + 1)",
"def do_add_node(self, line=''):\n self.fibbing.add_node()",
"def add_node (self, node):\n self.network.add_node(node.id)\n self.network.node[node.id] = node",
"def add_node(self, node):\n self._nodes.add(node)",
"def add_node(self, node):\n self._nodes.add(node)",
"def add_node(self, node):\n if node not in self.nodes:\n self.nodes.append(node)",
"def add_node(self, node):\n self._execution_pool[node.name] = node",
"def add_node(self, node):\n\n # Add node only if it does not exist yet\n if node.id() in self.__nodes:\n return\n\n labels = node.labels()\n for label in labels:\n break\n\n if label not in self.__labels:\n self.__labels[label] = len(self.__labels)\n\n js = \"nodes.push({index: \" + str(node.id()) + \", \" +\\\n \"name: \\\"\" + str(node.id()) + \"\\\", \" +\\\n \"group: \" + str(self.__labels[label]) + \\\n \" });\"\n\n d3_node_id = self.frame.evaluateJavaScript(js) - 1\n self.__nodes[node.id()] = str(d3_node_id)\n logger.info(\"node id %s - > d3 id: %s\", node.id(), d3_node_id)",
"def add_node(self, node):\n if node in self.nodes:\n return\n\n self.nodes_need_process.add(node)\n self.nodes.add(node)\n self.inputs.discard(node)\n self.inputs.update(\n {\n n\n for n in node.all_input_nodes\n if n.op in CALLABLE_NODE_OPS and n not in self.nodes\n }\n )",
"def add_node(self, **kwargs):\n self._content.append(Node(**kwargs))",
"def addChild(node):",
"def add_node(self, node_data):\n self.__rtags.append(True)\n self.__nodedata.append(data)\n self.__ltags.append(True)",
"def add_node(self, name, node):\n self.nodes.setdefault(name, node)",
"def add_node(self, name, node):\n\n self.nodes[name] = fold_constant(node)",
"def add(self, node, arrow = None):\n## print(node)\n self.graph = addNode(self.graph, node, arrow)",
"def add_node(self, node):\n if node not in self.nodes:\n self._nodes.append(node)",
"def append_node(self, node):\n self.nodes.append(node)\n node.slot = len(self.nodes)",
"def __iadd__(self, node):\r\n\r\n self.stream.add(node)\r\n if self.node:\r\n self.stream.connect(self.node, node)\r\n self.node = node\r\n\r\n return self",
"def add_node(self, node):\n temp = self.head.post\n self.head.post = node\n node.pre = self.head\n node.post = temp\n temp.pre = node"
] | [
"0.78037673",
"0.75197977",
"0.74303645",
"0.74014753",
"0.7204696",
"0.7202427",
"0.7178709",
"0.7173464",
"0.715541",
"0.7050441",
"0.7048333",
"0.70343333",
"0.703212",
"0.70309037",
"0.7029448",
"0.7029448",
"0.70204383",
"0.70176816",
"0.7000545",
"0.69609106",
"0.6937558",
"0.6920716",
"0.6909863",
"0.6812969",
"0.67774993",
"0.6777055",
"0.6758636",
"0.67155963",
"0.6700419",
"0.6700401"
] | 0.7537602 | 1 |
Replace the subtrees with old and new nodes as subroots | def update_subtree(self, old_subroot: 'GraphNode', new_subroot: 'GraphNode'):
self.operator.update_subtree(old_subroot, new_subroot) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def root_replace(self,node):\r\n self.feature_index = node.feature_index\r\n self.threshold = node.threshold\r\n self.label = node.label\r\n self.left = node.left\r\n self.right = node.right\r\n self.substitute = node.substitute\r\n if node.left is not None and node.right is not None:\r\n node.left.parents.remove(node) if node in node.left.parents else node.left.parents\r\n node.left.parents.append(self) if self not in node.left.parents else node.left.parents\r\n node.right.parents.remove(node) if node in node.right.parents else node.right.parents\r\n node.right.parents.append(self) if self not in node.right.parents else node.right.parents",
"def leaf_replace(self, node):\r\n if self.label is not None: # return if leaf node\r\n return\r\n left, right = self.left, self.right\r\n left.parents.remove(self) if self in left.parents else left.parents\r\n right.parents.remove(self) if self in right.parents else right.parents\r\n if node.label is None:\r\n internal = [node]\r\n else:\r\n internal = []\r\n while len(internal) > 0:\r\n l = internal.pop(0)\r\n if l.left.label is not None: # leaf\r\n if l.left.label == 0:\r\n l.left = left\r\n left.parents.append(l) if l not in left.parents else left.parents\r\n elif l.left.label == 1:\r\n l.left = right\r\n right.parents.append(l) if l not in right.parents else right.parents\r\n else:\r\n internal.append(l.left)\r\n\r\n if l.right.label is not None: # leaf\r\n if l.right.label == 0:\r\n l.right = left\r\n left.parents.append(l) if l not in left.parents else left.parents\r\n elif l.right.label == 1:\r\n l.right = right\r\n right.parents.append(l) if l not in right.parents else right.parents\r\n else:\r\n internal.append(l.right)",
"def _replace(self, x, y):\n y.parent = x.parent\n if x is self.root:\n self.root = y\n return\n elif x is x.parent.left:\n x.parent.left = y\n else:\n x.parent.right = y\n\n self.update(y, -1)",
"def _auxRefreshTree(self, tree_index):\n tree_item = self.treeItem(tree_index)\n logger.debug(\"_auxRefreshTree({}): {}{}\".format(\n tree_index, tree_item.obj_path,\n \"*\" if tree_item.children_fetched else \"\"))\n\n if tree_item.children_fetched:\n\n old_items = tree_item.child_items\n new_items = self._fetchObjectChildren(tree_item.obj,\n tree_item.obj_path)\n\n old_item_names = [(item.obj_name,\n item.is_attribute) for item in old_items]\n new_item_names = [(item.obj_name,\n item.is_attribute) for item in new_items]\n seqMatcher = SequenceMatcher(isjunk=None, a=old_item_names,\n b=new_item_names,\n autojunk=False)\n opcodes = seqMatcher.get_opcodes()\n\n logger.debug(\"(reversed) \"\n \"opcodes: {}\".format(list(reversed(opcodes))))\n\n for tag, i1, i2, j1, j2 in reversed(opcodes):\n\n if 1 or tag != 'equal':\n logger.debug(\" {:7s}, a[{}:{}] ({}), b[{}:{}] ({})\"\n .format(tag, i1, i2,\n old_item_names[i1:i2], j1, j2,\n new_item_names[j1:j2]))\n\n if tag == 'equal':\n # Only when node names are equal is _auxRefreshTree\n # called recursively.\n assert i2-i1 == j2-j1, (\"equal sanity \"\n \"check failed \"\n \"{} != {}\".format(i2-i1, j2-j1))\n for old_row, new_row in zip(range(i1, i2), range(j1, j2)):\n old_items[old_row].obj = new_items[new_row].obj\n child_index = self.index(old_row, 0, parent=tree_index)\n self._auxRefreshTree(child_index)\n\n elif tag == 'replace':\n # Explicitly remove the old item and insert the new.\n # The old item may have child nodes which indices must be\n # removed by Qt, otherwise it crashes.\n assert i2-i1 == j2-j1, (\"replace sanity \"\n \"check failed \"\n \"{} != {}\").format(i2-i1, j2-j1)\n\n # row number of first removed\n first = i1\n # row number of last element after insertion\n last = i1 + i2 - 1\n logger.debug(\" calling \"\n \"beginRemoveRows({}, {}, {})\".format(\n tree_index, first, last))\n self.beginRemoveRows(tree_index, first, last)\n del tree_item.child_items[i1:i2]\n self.endRemoveRows()\n\n # row number of first element after insertion\n first = i1\n # row number of last element after insertion\n last = i1 + j2 - j1 - 1\n logger.debug(\" calling \"\n \"beginInsertRows({}, {}, {})\".format(\n tree_index, first, last))\n self.beginInsertRows(tree_index, first, last)\n tree_item.insert_children(i1, new_items[j1:j2])\n self.endInsertRows()\n\n elif tag == 'delete':\n assert j1 == j2, (\"delete\"\n \" sanity check \"\n \"failed. {} != {}\".format(j1, j2))\n # row number of first that will be removed\n first = i1\n # row number of last element after insertion\n last = i1 + i2 - 1\n logger.debug(\" calling \"\n \"beginRemoveRows\"\n \"({}, {}, {})\".format(tree_index,\n first, last))\n self.beginRemoveRows(tree_index, first, last)\n del tree_item.child_items[i1:i2]\n self.endRemoveRows()\n\n elif tag == 'insert':\n assert i1 == i2, (\"insert \"\n \"sanity check \"\n \"failed. {} != {}\".format(i1, i2))\n # row number of first element after insertion\n first = i1\n # row number of last element after insertion\n last = i1 + j2 - j1 - 1\n logger.debug(\" \"\n \"calling beginInsertRows\"\n \"({}, {}, {})\".format(tree_index,\n first, last))\n self.beginInsertRows(tree_index, first, last)\n tree_item.insert_children(i1, new_items[j1:j2])\n self.endInsertRows()\n else:\n raise ValueError(\"Invalid tag: {}\".format(tag))",
"def fix_from(root, infos):\n # Assume root has correct parents (or none)\n old_hash = root['old_hash']\n new_hash = sha1(info2str(root).encode('latin1')).hexdigest()\n for info in infos:\n if not 'parents' in info:\n continue\n if old_hash in info['parents']:\n index = info['parents'].index(old_hash)\n info['parents'][index] = new_hash\n info['fixed_parents'][index] = True",
"def prune_tree( cls, tree, begin_index, end_index ):\n \n begin_path = tree.leaf_treeposition(begin_index)\n end_path = tree.leaf_treeposition(end_index)\n\n current_node = tree[begin_path[:-1]]\n end_node = tree[end_path[:-1]]\n \n new_tree = ParentedTree('(' + tree.node + ')')\n ## Initialize new tree\n l = []\n current_new = new_tree\n current_old = tree\n for i in xrange(len(begin_path)-1):\n if type(current_old[begin_path[i]]) != str:\n current_new.insert(0, ParentedTree('('+current_old[begin_path[i]].node +')'))\n current_new = current_new[0]\n current_old = current_old[begin_path[i]]\n \n while current_old != end_node:\n if not (type(current_old[0]) == str or type(current_old[0]) == unicode):\n current_old = current_old[0]\n current_new.insert( 0, ParentedTree('('+current_old.node +')'))\n current_new = current_new[0]\n else:\n current_new.insert(0, current_old[0])\n while len(current_old.parent()) == current_old.parent_index() + 1:\n current_old = current_old.parent()\n current_new = current_new.parent()\n\n current_old = current_old.parent()[current_old.parent_index() + 1]\n current_new.parent().insert( current_new.parent_index() + 1,\n ParentedTree('('+current_old.node +')'))\n \n current_new = current_new.parent()[current_new.parent_index() + 1]\n current_new.insert(0, current_old[0])\n# print current_new\n return new_tree",
"def recoverTree(self, root: Optional[TreeNode]) -> None:\n self.inorder(root)\n self.first.val,self.second.val=self.second.val,self.first.val",
"def fix_root(self):\n # In the main bzrlib code, this forces the new tree to use the same\n # tree root as the old tree. But merge-into explicitly doesn't want\n # that. So the first portion is just a copy of the old code, and then\n # we change the rest.\n try:\n self.tt.final_kind(self.tt.root)\n except NoSuchFile:\n self.tt.cancel_deletion(self.tt.root)\n if self.tt.final_file_id(self.tt.root) is None:\n self.tt.version_file(self.tt.tree_file_id(self.tt.root),\n self.tt.root)\n # All we do is skip the step which used to sanitize the root id.",
"def recoverTree(self, root: TreeNode) -> None:\n self.inorder(root)\n self.first.val, self.second.val = self.second.val, self.first.val",
"def _replace_node(self, nxt, node):\n nxt.left = node.left\n nxt.right = node.right\n nxt.parent = node.parent\n if node is self.root:\n self.root = nxt\n if nxt.left:\n nxt.left.parent = nxt\n if nxt.right:\n nxt.right.parent = nxt\n if nxt.parent:\n if nxt.parent.right is node:\n nxt.parent.right = nxt\n else:\n nxt.parent.left = nxt",
"def rebalance_root(self):\n split_dirs = [d.split('/') for d in self.directories]\n new_root = []\n for level in zip(*split_dirs):\n if not(all([d == level[0] for d in level])):\n break\n new_root.append(level[0])\n self.root = '/'.join(new_root)",
"def regenerate_tree(self, newpos):\n self.path = self.tree[newpos][2]\n self.tree = self.get_tree()\n self.pos = self.get_curpos()",
"def replace_node(self, node,new_node):\n #Special Case: Replace the root.\n if node == self.root :\n self.root = new_node\n return\n parent = node.parent\n if parent.left and parent.left == node:\n parent.left = new_node\n elif parent.right and parent.right == node:\n parent.right = new_node\n else:\n print(\"Incorrect Parent-Child relation!\")\n raise RuntimeError",
"def recoverTree(self, root: TreeNode) -> None:\n arr1=[]\n self.toList(root,arr1)\n print (arr1) \n num1=None\n num2=arr1[-1]\n l=len(arr1)\n i=0\n arr2=[]+arr1\n arr2.sort()\n for i in range(l):\n if not arr1[i]==arr2[i]:\n if num1:\n num2=arr1[i]\n break\n num1=arr1[i]\n self.replace(root,num1,num2)\n\n print(root)",
"def reinitialize(self):\n if self.is_leaf():\n self.__hash__(reinit=True)\n return {self}, {self}\n else:\n children_leaves = set()\n children_nodes = {self}\n # iterating over the children\n for child in self.child_nodes:\n cur_child_leaves, cur_child_nodes = self.child_nodes[child].reinitialize()\n children_leaves = children_leaves.union(cur_child_leaves)\n children_nodes = children_nodes.union(cur_child_nodes)\n # storing the sets for later use\n self.__hash__(reinit=True)\n self.leaves = children_leaves\n self.nodes = children_nodes\n return children_leaves, children_nodes",
"def replace_subtree(self, tree, update_tree=True):\n if self.parent is None: # Changing the whole tree\n self.__dict__ = tree.__dict__\n else:\n if self is self.parent.left_subtree:\n self.parent.left_subtree = tree\n else:\n self.parent.right_subtree = tree\n if update_tree:\n self.update_tree()\n return self",
"def unifyPreviewNodes(self):\n\n self.leaves.update(self.forced)\n self.forced = set()",
"def recoverTree(self, root: TreeNode) -> None:\n self.tmp, self.left, self.right = None, None, None\n self.helper(root)\n self.left.val, self.right.val = self.right.val, self.left.val",
"def update_tree(tree, subtree_hierarchy):\n new_tree = subtree_hierarchy.copy()\n for bg_pop, row in subtree_hierarchy.iterrows():\n # Remove not showing pops from new_tree\n if row['To_show'] == 'no':\n new_tree = new_tree.drop(bg_pop)\n continue\n\n # Find Parent\n parent_to_show = row['Parent']\n # If bg_pop has no Parent, skip\n if parent_to_show == '':\n continue\n # If Parent not in subtree, skip\n if parent_to_show not in subtree_hierarchy.index:\n continue\n # If Parent has To_show = 'no', find Parent of Parent, etc.\n while subtree_hierarchy.at[parent_to_show, 'To_show'] == 'no':\n parent_to_show = subtree_hierarchy.at[parent_to_show, 'Parent']\n # Set Parent to show in new_tree\n new_tree.at[bg_pop, 'Parent'] = parent_to_show\n\n new_tree = new_tree.reset_index()[['index', 'BG_population', 'Parent', 'BG_label']]\n # For pairs ('BG_population', 'Parent') that has coords, add coords\n new_tree_pos = new_tree.merge(tree.reset_index(), how='left', on=['BG_population', 'Parent'])\n new_tree_pos = new_tree_pos[['index_x', 'BG_population', 'Parent', 'posX', 'posY', 'BG_label_x']] \\\n .rename(columns={'index_x': 'index', 'BG_label_x': 'BG_label'}) \\\n .fillna('')\n\n return new_tree_pos",
"def convertTreeToCoveringTree( self, tree ):\n\n self.debug( \"convertTreeToCoveringTree: tree at start\" )\n if E.getLogLevel() >= 2: self.printTree( tree )\n \n ntree = self.addChildren( tree )\n \n #######\n # descend tree and add new domains\n # if domain has only a single child: delete the child and\n # rewire\n for t in ntree:\n info, children = t\n \n if info:\n node, parent, level, ranges = info\n \n if len(children) == 1:\n ntree[children[0]][0] = None\n ntree[node][1] = ntree[children[0]][1]\n \n #######\n # build new tree with new node identifiers\n current_node = 0\n covering_tree = []\n \n levels = map( lambda x: [], [0] * len(tree))\n \n for t in ntree:\n info, children = t\n \n if not info: continue\n node, parent, level, ranges = info\n \n if len(children) == 2:\n \n # add new node to tree, rename parent in children and\n # set borders\n leftchild = children[0]\n rightchild = children[1] \n \n # change left child\n lnode, lparent, llevel, lranges = ntree[leftchild][0]\n rnode, rparent, rlevel, rranges = ntree[rightchild][0] \n \n if ranges:\n lranges, rranges = self.getCoveringRanges( lranges, rranges, ranges )\n else:\n continue\n \n # change left child\n ntree[leftchild][0]= (None, current_node, level + 1, lranges) \n \n # change right child \n # cnode, cparent, clevel, cranges = ntree[rightchild][0]\n ntree[rightchild][0]= (None, current_node, level + 1, rranges )\n \n covering_tree.append( [level, parent, 0, 0, ranges] )\n levels[level].append( current_node )\n \n current_node += 1\n \n max_range = covering_tree[0][4][0][1]\n \n self.debug( \"convertTreeToCoveringTree: tree before removing small domains\" )\n if E.getLogLevel() >= 2: self.printTree( covering_tree )\n \n ###################################\n ## remove small fragments\n ## has to be done per level in order to be consistent\n ## done here and not during matrix decomposition, so that\n ## matrix needs not to be permuted more than once.\n for l in range(0, len(levels)):\n if len(levels[l]) == 0: break\n # collect all domains per level in a list of the form\n # (from, to, node)\n ranges = []\n for node in levels[l]:\n ranges += map(lambda x: (x[0], x[1], node), covering_tree[node][4])\n covering_tree[node][4] = []\n \n # and remove small fragments\n new_ranges = self.removeSmallRanges( ranges )\n \n # and put back into tree if there is more than one range\n for (xfrom, xto, node) in new_ranges:\n covering_tree[node][4].append( (xfrom, xto) )\n \n ###################################\n ## delete nodes with empty ranges or only a single child.\n ## renumber nodes so that there are no gaps\n\n self.debug( \"convertTreeToCoveringTree: after removing small domains\" )\n if E.getLogLevel() >= 2: self.printTree( covering_tree )\n \n return self.collapseTree( covering_tree )",
"def root_nodes(self, node1, node2, distance):\n if node1 == node2.parent:\n upper_node = node1\n lower_node = node2\n upper_dist, lower_dist = distance, lower_node.branch - distance\n elif node2 == node1.parent:\n upper_node = node2\n lower_node = node1\n upper_dist, lower_dist = lower_node.branch - distance, distance\n else:\n raise PhyloValueError('root_nodes() requires that one of the given nodes is the parent of the other.')\n if len(self.root.children) <= 1:\n raise PhyloValueError('cannot re-root a tree where the existing root has one or no children.')\n elif len(self.root.children) == 2:\n if upper_node == self.root:\n # Just need to adjust branch lengths\n root_child = self.root.children[1] if self.root.children[0] == lower_node else self.root.children[0]\n root_child.branch += upper_dist\n lower_node.branch = lower_dist\n else:\n upper_path = self.find_path_to_root(upper_node)\n # Process the old root child after removing the root:\n root_child = self.root.children[1] if self.root.children[0] == upper_path[1] else self.root.children[0]\n root_child.branch += upper_path[1].branch\n root_child.parent = upper_path[1]\n upper_path[1].children.append(root_child)\n # Process nodes between root and upper_node:\n prev_node = upper_path[1]\n for next_node in upper_path[2:]:\n prev_node.children.remove(next_node)\n prev_node.parent = next_node\n next_node.children.append(prev_node)\n prev_node.branch = next_node.branch\n prev_node = next_node\n # Process upper_node, lower_node, and the new root\n upper_node.parent = lower_node.parent = self.root\n upper_node.children.remove(lower_node)\n self.root.children = [node1, node2] # Keeps the argument order\n upper_node.branch = upper_dist\n lower_node.branch = lower_dist\n else: # If the root has 3 children it means it's an unrooted tree\n new_root = self.new_tree_node()\n new_root.branch = self.root.branch # Transfers any existing root branch\n if upper_node != self.root:\n upper_path = self.find_path_to_root(upper_node)\n prev_node = self.root\n for next_node in upper_path[1:]:\n prev_node.children.remove(next_node)\n prev_node.parent = next_node\n next_node.children.append(prev_node)\n prev_node.branch = next_node.branch\n prev_node = next_node\n upper_node.children.remove(lower_node)\n upper_node.branch = upper_dist\n lower_node.branch = lower_dist\n new_root.children.append(upper_node)\n new_root.children.append(lower_node)\n upper_node.parent = lower_node.parent = new_root\n self.root = new_root\n self.process_tree_nodes()",
"def _fix_up_to_root(self, idx):\n combine_fn = self._combine_fn\n while idx >= 1:\n # self.data[idx] = combine_fn(self.data[self._left(idx)], self.data[self._right(idx)])\n self.data[idx] = combine_fn(self.data[2 * idx], self.data[2 * idx + 1])\n # idx = self._parent(idx)\n idx = idx >> 1",
"def recoverTree(self, root: TreeNode) -> None:\n # base case\n if not root:\n return\n # a list to store node to be exchange\n change = []\n lst = self.inorder(root)\n for i in range(len(lst)-1):\n if lst[i+1].val < lst[i].val:\n # If we already found the first one i, the seconde one would be i+1\n # you can find that in the second example given by Leetcode\n if change:\n change.append(i+1)\n else:\n change.append(i)\n # exchange elements\n if len(change) == 1:\n lst[change[0]].val, lst[change[0]+1].val = lst[change[0]+1].val, lst[change[0]].val\n else:\n lst[change[0]].val, lst[change[1]].val = lst[change[1]].val, lst[change[0]].val",
"def clean():\n new_tree = None",
"def swap_nodes(tree) -> None:\n if tree is None:\n raise ValueError('Empty tree')\n tmp = tree.left\n tree.left = tree.right\n tree.right = tmp",
"def test_compare_old_to_new_method_to_create_trees(self):\n nodes = util.generate_sequence_of_points(2, 2)\n tree1 = kdtree.createNewTree(nodes)\n kdtree.visualize(tree1)\n \n sel_axis = (lambda axis: axis)\n tree2 = kdtree.createNewTree([[0.5, 0.5]],axis = 0, sel_axis= sel_axis)\n tree2.split2([0.25, 0.5], axis = 1)\n tree2.split2([0.75, 0.5], axis = 1)\n \n #left\n tree2.split2([0.25, 0.25], axis = 0, sel_axis = sel_axis)\n tree2.split2([0.25, 0.75], axis = 0, sel_axis = sel_axis)\n \n #right\n tree2.split2([0.75, 0.25], axis = 0, sel_axis = sel_axis)\n tree2.split2([0.75, 0.75], axis = 0, sel_axis = sel_axis)\n \n kdtree.visualize(tree2)\n \n for n in zip(kdtree.level_order(tree1), kdtree.level_order(tree2)):\n self.assertEqual(n[0].data, n[1].data, \"elements not equal\")\n \n if n[0].data is not None and n[1].data is not None:\n self.assertEqual(n[0].axis, n[1].axis, \"elements not equal\")",
"def merge_nodes(node_ids, tree):\n # Copy the tree so we keep the original intact\n temp = deepcopy(tree)\n # Don't merge nodes if they are already merged\n if node_ids in temp or tuple(reversed(node_ids)) in temp:\n return temp\n # Get all the children of each node we need to merge (except if that child is the other node)\n children_1 = filter(lambda x: x != node_ids[1], temp[node_ids[0]])\n children_2 = filter(lambda x: x != node_ids[0], temp[node_ids[1]])\n merged_children = children_1 + children_2\n # Remove the original nodes in the tree\n temp.pop(node_ids[0], None)\n temp.pop(node_ids[1], None)\n # Add a new node\n temp[node_ids] = merged_children\n \n # Update references to the old node with references to the new node\n for k, v in temp.iteritems():\n if node_ids[0] in v:\n idx = v.index(node_ids[0])\n v.remove(node_ids[0])\n v.insert(idx, node_ids)\n if node_ids[1] in v:\n idx = v.index(node_ids[1])\n v.remove(node_ids[1])\n v.insert(idx, node_ids)\n temp[k] = dedupe(v)\n return temp",
"def updateTree(self):\n self.reset()\n self.resetTree() \n self.read()",
"def untangle_roots(self):\n\n for root in self.all_seed_roots:\n root_queue = [root]\n while root_queue:\n for output_root in self.connect_roots(root_queue.pop(0)):\n root_queue.append(output_root)",
"def _fix_other_tree(self, this_tree, other_tree):\n parent_dir, name = osutils.split(self._target_subdir)\n parent_id = this_tree.path2id(parent_dir)\n\n root_ie = other_tree.inventory.root\n root_ie.parent_id = parent_id\n root_ie.name = name\n\n new_file_id = generate_ids.gen_file_id(name)\n trace.mutter('munging root_ie.file_id: %s => %s', root_ie.file_id,\n new_file_id)\n del other_tree.inventory._byid[root_ie.file_id]\n root_ie.file_id = new_file_id\n other_tree.inventory._byid[new_file_id] = root_ie\n # We need to fake a new id for root_ie\n for child_ie in root_ie.children.itervalues():\n child_ie.parent_id = new_file_id"
] | [
"0.7107235",
"0.65856475",
"0.6365465",
"0.63301456",
"0.625012",
"0.6236693",
"0.61715674",
"0.61310095",
"0.6120327",
"0.60683495",
"0.6065253",
"0.6024953",
"0.6012735",
"0.5961078",
"0.5948224",
"0.5947182",
"0.59322995",
"0.5925321",
"0.5905774",
"0.5905076",
"0.58694637",
"0.58630884",
"0.5862238",
"0.5861653",
"0.58546174",
"0.5838448",
"0.58382607",
"0.5834458",
"0.58314943",
"0.57880497"
] | 0.6714457 | 1 |
Delete the subtree with node as subroot. | def delete_subtree(self, subroot: 'GraphNode'):
self.operator.delete_subtree(subroot) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_root(self, node):\n current = node\n successor = self.find_successor(current) \n temp_height = current.height\n current.height = successor.height\n successor.height = temp_height\n\n if successor != None:\n self.root = successor\n parent = successor.parent\n\n if successor.parent != node:\n if parent.left == successor:\n parent.left = successor.left\n else:\n parent.right = successor.right\n if node.left != successor:\n successor.left = node.left\n else:\n successor.left = None\n if node.right != successor:\n successor.right = node.right \n else:\n successor.right = None\n\n else:\n ancestor = node.left\n ancestor.parent = None\n self.root = ancestor\n del self.nodes[node.key]",
"def Delete(root, node):\n target = root.FindLeaf(node)\n if target == None:\n # print 'no result'\n print(\"no result\")\n return root\n target.leaves.remove(node)\n target.CondenseTree()\n root = root.CondenseRoot()\n return root",
"def delete_subtree(self, node: Node):\n for node_child in self.node_children(node):\n node_child.nodes_from.remove(node)\n for subtree_node in node.ordered_subnodes_hierarchy():\n self._chain.nodes.remove(subtree_node)",
"def delete_one_child(self, node):\n if node.left != None:\n child = node.left\n else:\n child = node.right\n \n parent = node.parent\n if parent.left == node:\n parent.left = child\n else:\n parent.right = child\n child.parent = parent\n del self.nodes[node.key]\n\n self.update_path(parent)",
"def _delete(self, node: TreeNode) -> None:\n if node.height == 1: # node has no children\n if node.parent:\n if node.parent.left == node:\n node.parent.left = None\n else:\n node.parent.right = None\n else:\n self.root = None\n new_node = node.parent\n node = None\n elif node.left == None: # node has only right child\n if node.parent:\n if node.parent.left == node:\n node.parent.left = node.right\n else:\n node.parent.right = node.right\n else:\n self.root = node.right\n node.right.parent = node.parent\n new_node = node.parent\n node = None\n elif node.right == None: # node has only left child\n if node.parent:\n if node.parent.left == node:\n node.parent.left = node.left\n else:\n node.parent.right = node.left\n else:\n self.root = node.left\n node.left.parent = node.parent\n new_node = node.parent\n node = None\n else: # node has 2 children\n next_larger = self.successor(node.val)\n node.val = next_larger.val\n return self._delete(next_larger)\n self._inspect_changes(new_node)",
"def delete_leaves_2(self, root):\n if not (root.left or root.right):\n print(\"delete\")\n print(root.data)\n print(\"---------\")\n root = None\n return\n\n if root.left:\n self.delete_leaves_2(root.left)\n\n if root.right:\n self.delete_leaves_2(root.right)",
"def delete(self, val):\n\n\t\tself.root = self.deleteHelper(self.root, val)\n\t\tself.numNodes = 0\n\t\tif self.root:\n\t\t\tQ = [self.root]\n\t\t\twhile Q:\n\t\t\t\tnode = Q.pop(0)\n\t\t\t\tif node.left:\n\t\t\t\t\tQ.append(node.left)\n\t\t\t\tif node.right:\n\t\t\t\t\tQ.append(node.right)\n\t\t\t\tself.numNodes += 1",
"def delete_node(self, node):\r\n\r\n # if node is loose LEAF, just delete the node and tell its parent its child is gone\r\n if not node.rightchild and not node.leftchild:\r\n if node == node.parent.rightchild:\r\n node.parent.rightchild = None\r\n if node == node.parent.leftchild:\r\n node.parent.leftchild = None\r\n # if node has ONE CHILD, being left: just delete the node and tell its parent is node's left child\r\n if not node.rightchild and node.leftchild:\r\n if node == node.parent.rightchild:\r\n node.parent.rightchild = node.leftchild\r\n if node == node.parent.leftchild:\r\n node.parent.leftchild = node.leftchild\r\n # if node has ONE CHILD, being right: just delete the node and tell its parent is node's right child\r\n if node.rightchild and not node.leftchild:\r\n if node == node.parent.rightchild:\r\n node.parent.rightchild = node.rightchild\r\n if node == node.parent.leftchild:\r\n node.parent.leftchild = node.rightchild\r\n # if node has TWO CHILDREN: swap node with the one containing the inorder successor, then solve the problem from\r\n # there by trying to delete that node (which is a recursive call)\r\n if node.rightchild and node.leftchild:\r\n swapnode = self.get_inorder_successor_from_right_part_tree(node.rightchild)\r\n temp = node.object\r\n node.object = swapnode.object\r\n swapnode.object = temp\r\n self.delete_node(swapnode)",
"def clear_tree(self):\n self.treeview.delete(*self.treeview.get_children())",
"def deleteUtil(self, node, root):\n if node is None:\n return node\n\n node.left = self.deleteUtil(node.left, root)\n node.right = self.deleteUtil(node.right, root)\n\n if node == root:\n if root.left is None:\n temp = root.right\n root = None\n return temp\n\n elif root.right is None:\n temp = root.left\n root = None\n return temp\n\n # Get inorder successor of root\n temp = self.getLeftmost(root.right)\n root.key = temp.key\n\n # Recursively delete inorder successor\n root.right = self.deleteUtil(root.right, temp)\n\n return node",
"def remove_subtree(self, root: State):\n self.remove(root)\n for child in root.children:\n self.remove_subtree(child)",
"def delete(self, tree_path):\n\t\traise NotImplementedError",
"def delete(self):\n self.parent.delete_node(self)",
"def test_remove_childless_on_delete_tree2(delete_tree):\n delete_tree.remove(\"teabaggers\")\n assert delete_tree.contains(\"teabaggers\") is False",
"def delete_node(self, u_node_id):\n node = self.node_memory[u_node_id]\n\n # Delete the formulas from the tree, but keep the formulas in node for restoration later\n copy = list(node.formulas)\n for f in node.formulas:\n self.delete_formula(f)\n node.formulas = copy\n\n # Remove node from parent_formula\n parent_formula = node.parent_formula\n parent_formula.node_children.remove(node)\n\n # Remove the node from parent\n node.parent.children.remove(node)\n\n # Remove the node from the Tree node list\n self.nodes.pop(node.node_id)\n self.readjust_node_id(node.node_id)",
"def remove_subtree(self, update_tree=True):\n self.left_subtree = None\n self.right_subtree = None\n if update_tree:\n self.update_tree()\n return self",
"def delete_node(self, node):\n return node.delete()",
"def test_remove_childless_on_delete_tree(delete_tree):\n tree_size = delete_tree.size\n delete_tree.remove(\"teabaggers\")\n assert delete_tree.size == tree_size - 1",
"def delete_tree(self, idx: int = None, id: int = None):\n\n if id is not None:\n idx = self.tree_ids.index(id)\n\n self.nodes.pop(idx)\n self.edges.pop(idx)\n self.names.pop(idx)\n self.colors.pop(idx)\n self.tree_ids.pop(idx)\n self.group_ids.pop(idx)",
"def test_remove_middle_child_on_delete_tree2(delete_tree):\n delete_tree.remove(\"teabag\")\n assert delete_tree.contains(\"teabag\") is False",
"def delete_node(self, node_id, connection=None):\n\n connection = connection or self.engine.connect()\n\n # delete the paths associated with this node\n connection.execute(\n self.paths.delete().where(\n self.paths.c.descendant.in_(\n select(\n [self.paths.c.descendant]\n ).where(\n self.paths.c.ancestor == node_id\n ))\n )\n )\n\n # delete the node\n connection.execute(\n self.nodes.delete().where(\n self.nodes.c.id == node_id\n )\n )",
"def delete(self, key):\n root = self.find(key, True)\n if root is False:\n return False\n\n parent = root.parent\n\n # root deleten\n if self.root == root:\n found = False\n current = root\n if root.left is not None:\n current = root.left\n while not found:\n if current.right is not None:\n current = current.right\n else:\n found = True\n\n if root.left != current:\n current.parent.right = None\n current.left = root.left\n current.left.parent = current\n\n current.parent = None\n if root.right is not None:\n current.right = root.right\n current.right.parent = current\n\n self.__removenode(self.root.key)\n self.root = current\n del root\n self.size -= 1\n\n elif root.right is not None:\n current = root.right\n while not found:\n if current.left is not None:\n current = current.left\n else:\n found = True\n\n if root.right != current:\n current.parent.left = None\n current.right = root.right\n current.right.parent = current\n\n current.parent = None\n if root.left is not None:\n current.left = root.left\n current.left.parent = current\n self.__removenode(self.root.key)\n self.root = current\n del root\n self.size -= 1\n return True\n else:\n self.__removenode(self.root.key)\n self.root = None\n del root\n self.size -= 1\n return True\n\n\n\n\n # leaf zonder kinderen\n if root.left is None and root.right is None:\n if root == parent.left:\n parent.left = None\n self.__removenode(self.root.key)\n del root\n self.size -= 1\n return True\n else:\n parent.right = None\n self.__removenode(self.root.key)\n del root\n self.size -= 1\n return True\n\n # één kind\n if root.left is None:\n child = root.right\n parent.right = child\n child.parent = parent\n self.__removenode(self.root.key)\n self.size -= 1\n del root\n return True\n\n if root.right is None:\n child = root.left\n parent.left = child\n child.parent = parent\n self.__removenode(self.root.key)\n del root\n self.size -= 1\n return True\n\n # twee kinderen (inorder succesor)\n found = False\n current = root\n if root.left is not None:\n current = root.left\n while not found:\n if current.right is not None:\n current = current.right\n else:\n found = True\n\n if parent.right == root:\n parent.right = current\n if current == root.left:\n current.left = None\n else:\n current.parent.right = None\n current.left = root.left\n root.left.parent = current\n current.parent = parent\n\n if root.right is not None:\n root.right.parent = current\n current.right = root.right\n self.__removenode(self.root.key)\n del root\n self.size -= 1\n return True\n else:\n parent.left = current\n if current == root.left:\n current.left = None\n else:\n current.left = root.left\n root.left.parent = current\n current.parent = parent\n current.right = root.right\n if root.right is not None:\n root.right.parent = current\n self.__removenode(self.root.key)\n\n del root\n self.size -= 1\n return True",
"def tree_delete_node(table, row_id):\n c = conn.cursor()\n c.execute(\"\"\"SELECT parent_id, `left`, `right`, tree_id, `level` FROM %s WHERE id=%s\"\"\" % (table, row_id))\n row = c.fetchone()\n c.execute(\"\"\"DELETE FROM %s WHERE id=%s\"\"\" % (table, row_id))\n c.execute(\"\"\"UPDATE %s SET `left`=`left`-2 WHERE `left`>%s AND tree_id=%s\"\"\" % (table, row[\"right\"], row[\"tree_id\"]))\n c.execute(\"\"\"UPDATE %s SET `right`=`right`-2 WHERE `right`>%s AND tree_id=%s\"\"\" % (table, row[\"right\"], row[\"tree_id\"]))\n return True",
"def delete(root: Node, key: int):\n if root is None:\n return None\n if key < root.key:\n root.left = delete(root.left, key)\n elif key > root.key:\n root.right = delete(root.right, key)\n else:\n q = root.left\n r = root.right\n if not r:\n return q\n root_min = findmin(r)\n root_min.right = remove_min(r)\n root_min.left = q\n return balance_tree(root_min)\n return balance_tree(root)",
"def delete(self, k):\n node = self.find(k)\n if node is None:\n return None\n if node is self.root:\n pseudoroot = self.klass(None, 0)\n pseudoroot.left = self.root\n self.root.parent = pseudoroot\n deleted = self.root.delete()\n self.root = pseudoroot.left\n if self.root is not None:\n self.root.parent = None\n return deleted\n else:\n return node.delete()",
"def delete(self, k):\n node = self.find(k)\n if node is None:\n return None\n if node is self.root:\n pseudoroot = self.klass(None, 0)\n pseudoroot.left = self.root\n self.root.parent = pseudoroot\n deleted = self.root.delete()\n self.root = pseudoroot.left\n if self.root is not None:\n self.root.parent = None\n return deleted\n else:\n return node.delete()",
"def test_remove_middle_child_on_delete_tree(delete_tree):\n tree_size = delete_tree.size\n delete_tree.remove(\"teabag\")\n assert delete_tree.size == tree_size - 1",
"def delete_tree():\n from trie import Trie\n t = Trie()\n t.insert(\"ted\")\n t.insert(\"tea\")\n t.insert(\"teabag\")\n t.insert(\"teabags\")\n t.insert(\"teabagger\")\n t.insert(\"teabaggers\")\n t.insert(\"teabagged\")\n return t",
"def delete(self, key):\n if self.root is None:\n raise KeyError(\n 'Cannot delete keyue {} from an empty tree'.format(key))\n self.root = self.root.delete(key)",
"def test_remove_top_but_not_root2(delete_tree):\n delete_tree.remove(\"tea\")\n assert delete_tree.contains(\"teabag\") is True"
] | [
"0.75925773",
"0.7480435",
"0.7424216",
"0.71247655",
"0.70918965",
"0.70537895",
"0.6969854",
"0.6895656",
"0.68567795",
"0.682668",
"0.67609817",
"0.66893965",
"0.66813046",
"0.6672785",
"0.667115",
"0.66323125",
"0.6606388",
"0.6594874",
"0.6567626",
"0.6545911",
"0.6524752",
"0.65199566",
"0.65127534",
"0.64850545",
"0.64841896",
"0.64841896",
"0.6483735",
"0.64466983",
"0.64439607",
"0.6443544"
] | 0.8428926 | 0 |
compute the hs300 and zz500 weekly exposure on style factors | def factor_exposure(self):
exp_hs_all = pd.DataFrame([])
exp_zz_all = pd.DataFrame([])
for i in range(len(self.weekly_date)):
date = self.weekly_date.iloc[i,0]
factor = get_barra_factor_from_sql(date)
factor['secID'] = factor.index.tolist()
stocklist = factor.index.tolist()
hs300 = get_index_composition(date,'000300.SH')
zz500 = get_index_composition(date,'000905.SH')
hs300['secID'] = hs300.index.tolist()
zz500['secID'] = zz500.index.tolist()
stocklist_hs300 = list(set(hs300.index.tolist()).intersection(set(stocklist)))
stocklist_zz500 = list(set(zz500.index.tolist()).intersection(set(stocklist)))
stocklist_hs300.sort()
stocklist_zz500.sort()
factor_hs = extract_part_from_all(stocklist_hs300,factor,'secID')
factor_zz = extract_part_from_all(stocklist_zz500,factor,'secID')
hs_weight = extract_part_from_all(stocklist_hs300,hs300,'secID')
zz_weight = extract_part_from_all(stocklist_zz500,zz500,'secID')
del factor_hs['secID'],factor_zz['secID'],hs_weight['secID'],zz_weight['secID']
exp_hs = pd.DataFrame(np.dot(hs_weight.T,factor_hs))
exp_zz = pd.DataFrame(np.dot(zz_weight.T,factor_zz))
exp_hs_all = pd.concat([exp_hs_all,exp_hs], axis = 0)
exp_zz_all = pd.concat([exp_zz_all,exp_zz], axis = 0)
print(i)
exp_hs_all.columns = ['Beta','Momentum','Size','EY','RV','Growth',\
'BP','Leverage','Liquidity']
exp_zz_all.columns = ['Beta','Momentum','Size','EY','RV','Growth',\
'BP','Leverage','Liquidity']
exp_hs_all.index = self.weekly_date.iloc[:,0]
exp_zz_all.index = self.weekly_date.iloc[:,0]
return exp_hs_all,exp_zz_all | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def exposure():\n def r(x):\n return x/6e4\n\n def w(x):\n return int(x*6e4)\n return r, w",
"def get_weight(ew1, ew2):\n dw = flu.delta_epiweeks(ew1, ew2)\n yr = 52.2\n hl1, hl2, bw = yr, 1, 4\n a = 0.05\n #b = (np.cos(2 * np.pi * (dw / yr)) + 1) / 2\n b = np.exp(-((min(dw % yr, yr - dw % yr) / bw) ** 2))\n c = 2 ** -(dw / hl1)\n d = 1 - 2 ** -(dw / hl2)\n return (a + (1 - a) * b) * c * d",
"def internal_heat_gain(dwelling):\n losses_gain = -40 * dwelling.Nocc\n water_heating_gains = (1000. / 24.) * dwelling.heat_gains_from_hw / DAYS_PER_MONTH\n\n mean_appliance_energy = 207.8 * (dwelling.GFA * dwelling.Nocc) ** 0.4714\n appliance_consumption_per_day = (mean_appliance_energy / 365.) * (\n 1 + 0.157 * numpy.cos((2. * math.pi / 12.) * (numpy.arange(12) - .78)))\n\n appliance_consumption = appliance_consumption_per_day * DAYS_PER_MONTH\n\n if dwelling.reduced_gains:\n met_gain = 50 * dwelling.Nocc\n cooking_gain = 23 + 5 * dwelling.Nocc\n appliance_gain = (0.67 * 1000. / 24) * appliance_consumption_per_day\n light_gain = 0.4 * dwelling.full_light_gain\n else:\n met_gain = 60 * dwelling.Nocc\n cooking_gain = 35 + 7 * dwelling.Nocc\n appliance_gain = (1000. / 24) * appliance_consumption_per_day\n light_gain = dwelling.full_light_gain\n\n total_internal_gains = (met_gain\n + light_gain\n + appliance_gain\n + cooking_gain\n + water_heating_gains\n + dwelling.pump_gain\n + losses_gain)\n\n if dwelling.reduced_gains:\n summer_met_gain = 60 * dwelling.Nocc\n summer_cooking_gain = 35 + 7 * dwelling.Nocc\n summer_appliance_gain = (1000. / 24) * appliance_consumption_per_day\n summer_light_gain = dwelling.full_light_gain\n total_internal_gains_summer = (summer_met_gain +\n water_heating_gains +\n summer_light_gain +\n summer_appliance_gain +\n summer_cooking_gain +\n dwelling.pump_gain +\n losses_gain\n - dwelling.heating_system_pump_gain)\n else:\n total_internal_gains_summer = total_internal_gains - dwelling.heating_system_pump_gain\n\n # Apply results to dwelling\n return dict(appliance_consumption=appliance_consumption,\n met_gain=met_gain,\n cooking_gain=cooking_gain,\n appliance_gain=appliance_gain,\n light_gain=light_gain,\n water_heating_gains=water_heating_gains,\n losses_gain=losses_gain,\n total_internal_gains=total_internal_gains,\n total_internal_gains_summer=total_internal_gains_summer)",
"def year_scheme_emissions_intensity_rule(_m, y):\r\n\r\n return m.YEAR_EMISSIONS[y] / m.YEAR_SCHEME_OUTPUT[y]",
"def hl_wide(df):\n width = (df['high'] - df['low']) / df['open']\n\n return width.tail(5).mean(), width.tail(20).mean()",
"def soil_h2o_detail(vswc_fpath, mukey, layer_lim, cwscoef11, cwscoef12, start_year, end_year):\n import matplotlib\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n\n # define soils.in layer depth structure and layer water holding capacities\n depths = [2, 3, 5, 10, 10, 15, 15, 15, 15, 15, 15, 30, 30, 30]\n fcs, wps = fcs_wps(mukey, layers=layer_lim)\n\n # import file contents\n vswc_file = open(vswc_fpath, 'rU')\n contents = []\n for layer in vswc_file:\n entries = layer.split()\n for i in range(len(entries)):\n entries[i] = float(entries[i])\n\n # convert the year from float to int, and note the day\n year = int(math.floor(entries[0]))\n day = int(entries[1])\n\n # sum depth-weighted moisture content for layers within limit\n water_vol = 0\n for j in range(len(entries)):\n if j+1 <= layer_lim:\n water_vol += depths[j] * entries[j+2]\n\n # determine layer with maximum relative water content, and associated value\n rwcs = [] # relative water content\n for j in range(layer_lim):\n vwc = entries[j+2]\n rwc = (vwc-wps[j]) / (fcs[j]-wps[j])\n rwcs.append(rwc)\n max_rwc = max(rwcs)\n\n contents.append([year, day, water_vol, max_rwc])\n\n # # identify wettest and driest year w/in permissible range\n # annual_totals = [0]\n # years = []\n # latest_year = 200000\n # j = 0\n # for i in range(len(contents)):\n # year = int(math.floor(contents[i][0]))\n # if year <= latest_year:\n # annual_totals[j] += contents[i][2]\n # else:\n # annual_totals.append(0)\n # years.append(contents[i][0])\n # j += 1\n # latest_year = year\n # year2 = years[0]\n # years.insert(0, year2-1)\n # perm_years = []\n # perm_annual_totals = []\n # for year in years:\n # if start_year <= year <= end_year:\n # perm_years.append(year)\n # perm_annual_totals.append(annual_totals[years.index(year)])\n # wet_year = years[annual_totals.index(max(annual_totals))]\n # dry_year = years[annual_totals.index(min(annual_totals))]\n wet_year = start_year\n dry_year = end_year\n\n # extract wettest and driest year series within possible range\n wet_svwc = []\n dry_svwc = []\n wet_days = []\n dry_days = []\n wet_rwcs = []\n dry_rwcs = []\n for i in range(len(contents)):\n year = int(math.floor(contents[i][0]))\n if year == wet_year:\n wet_days.append(contents[i][1])\n wet_svwc.append(contents[i][2])\n wet_rwcs.append(contents[i][3])\n if year == dry_year:\n dry_days.append(contents[i][1])\n dry_svwc.append(contents[i][2])\n dry_rwcs.append(contents[i][3])\n\n # plot\n work_path = os.path.dirname(vswc_fpath)\n scenario = vswc_fpath.split('/')[-1][:-4]\n png_fpath = work_path+'/%s.png' % scenario\n f, axes = plt.subplots(2, 2, sharex=True)\n axes[0][0].plot(wet_days, wet_svwc, zorder=2)\n axes[0][0].set_xlim(100, 300)\n axes[0][0].set_ylim(0, 30)\n axes[0][0].set_title('%i' % wet_year)\n axes[0][0].set_ylabel('Root zone\\nwater content (cm)')\n axes[0][1].plot(dry_days, dry_svwc, zorder=2)\n axes[0][1].set_xlim(100, 300)\n axes[0][1].set_ylim(0, 30)\n axes[0][1].set_title('%i' % dry_year)\n\n axes[1][0].plot(wet_days, wet_rwcs, zorder=2)\n axes[1][0].fill_between(wet_days, 0, wet_rwcs, color='c', zorder=0)\n axes[1][0].set_ylabel('Maximum layer\\nrelative water content')\n axes[1][1].plot(dry_days, dry_rwcs, zorder=2)\n axes[1][1].fill_between(dry_days, 0, dry_rwcs, color='c', zorder=0)\n axes[1][0].set_ylim(0, 1)\n axes[1][1].set_ylim(0, 1)\n\n # add limits & save\n whc, min_h2o, max_h2o = whc_tot(mukey, layers=layer_lim)\n production_levels = [0.25, 0.5, 0.75]\n colors = ['red', 'orange', 'green']\n for i in range(len(production_levels)):\n rwcf = cwscoef11 - (math.log((1-production_levels[i])/production_levels[i]) / cwscoef12)\n axes[1][0].axhline(y=rwcf, color=colors[i], zorder=1)\n axes[1][1].axhline(y=rwcf, color=colors[i], zorder=1)\n axes[1][1].plot(200, 0, ms='none', color=colors[i], label='%s NPP' % str(production_levels[i]))\n axes[1][1].legend(loc='upper right', prop={'size': 10})\n\n axes[0][0].axhline(y=min_h2o, color='red', zorder=1)\n axes[0][0].axhline(y=max_h2o, color='green', zorder=1)\n axes[0][1].axhline(y=min_h2o, color='red', zorder=1)\n axes[0][1].axhline(y=max_h2o, color='green', zorder=1)\n axes[0][1].plot(200, 0, ms='none', color='green', label='field capacity')\n axes[0][1].plot(200, 0, ms='none', color='red', label='wilting point')\n axes[0][1].legend(loc='upper right', prop={'size': 10})\n\n f.text(0.5, 0.02, 'Day of year', ha='center')\n plt.savefig(png_fpath)\n plt.close()",
"def calc_calories(gpx_track, wt = 175, activity='Run'):",
"def whc_tot(mukey, layers=''):\n #read appropriate soils.in content to a python list\n mukey = str(mukey)\n soil_path = \"/data/paustian/ernie/SSURGO_master_script/soil_test2/\"\n soil_fpath = soil_path+mukey[:-3]+\"/\"+mukey+\".in\"\n cont = [[]]\n data_input = open(soil_fpath, 'r')\n for line in data_input:\n cont.append(line.split())\n del cont[0]\n\n #convert all entries in the 2D list to float format where possible, or zero in the case\n #of very small numbers recorded in scientific notation\n for k in range(len(cont)):\n for l in range(len(cont[k])):\n cont[k][l] = float(cont[k][l])\n\n #loop through list and compute the water holding capacity increment represented in \n #each line\n min_h2o_evap = 0\n min_h2o = 0\n max_h2o = 0\n whc = 0\n for i in range(len(cont)):\n if not layers:\n depth = cont[i][1] - cont[i][0]\n FC = cont[i][3]\n WP = cont[i][4]\n WHC = FC - WP\n if i != 0:\n min_h2o_evap += depth*WP\n min_h2o += depth*WP\n max_h2o += depth*FC\n whc += depth*WHC\n else:\n if 1+i <= layers:\n depth = cont[i][1] - cont[i][0]\n FC = cont[i][3]\n WP = cont[i][4]\n WHC = FC - WP\n if i != 0:\n min_h2o_evap += depth*WP\n min_h2o += depth*WP\n max_h2o += depth*FC\n whc += depth*WHC\n if layers:\n if layers > len(cont):\n print \"NOTE: specified layer limit exceeds number of layers found in soils.in file\"\n\n return whc, min_h2o, max_h2o",
"def fluxes_to_heating_rate(example_dict):\n\n down_flux_matrix_w_m02 = get_field_from_dict(\n example_dict=example_dict, field_name=SHORTWAVE_DOWN_FLUX_NAME\n )\n up_flux_matrix_w_m02 = get_field_from_dict(\n example_dict=example_dict, field_name=SHORTWAVE_UP_FLUX_NAME\n )\n pressure_matrix_pascals = get_field_from_dict(\n example_dict=example_dict, field_name=PRESSURE_NAME\n ) + 0.\n\n dummy_pressure_matrix_pascals = (\n pressure_matrix_pascals[:, [-1]] +\n (pressure_matrix_pascals[:, [-1]] - pressure_matrix_pascals[:, [-2]])\n )\n pressure_matrix_pascals = numpy.concatenate(\n (pressure_matrix_pascals, dummy_pressure_matrix_pascals), axis=1\n )\n\n net_flux_matrix_w_m02 = down_flux_matrix_w_m02 - up_flux_matrix_w_m02\n dummy_net_flux_matrix_w_m02 = (\n net_flux_matrix_w_m02[:, [-1]] +\n (net_flux_matrix_w_m02[:, [-1]] - net_flux_matrix_w_m02[:, [-2]])\n )\n net_flux_matrix_w_m02 = numpy.concatenate(\n (net_flux_matrix_w_m02, dummy_net_flux_matrix_w_m02), axis=1\n )\n\n coefficient = GRAVITY_CONSTANT_M_S02 / DRY_AIR_SPECIFIC_HEAT_J_KG01_K01\n\n # heating_rate_matrix_k_day01 = DAYS_TO_SECONDS * coefficient * (\n # numpy.gradient(net_flux_matrix_w_m02, axis=1) /\n # numpy.absolute(numpy.gradient(pressure_matrix_pascals, axis=1))\n # )\n\n heating_rate_matrix_k_day01 = DAYS_TO_SECONDS * coefficient * (\n numpy.diff(net_flux_matrix_w_m02, axis=1) /\n numpy.absolute(numpy.diff(pressure_matrix_pascals, axis=1))\n )\n\n error_checking.assert_is_numpy_array_without_nan(net_flux_matrix_w_m02)\n error_checking.assert_is_numpy_array_without_nan(pressure_matrix_pascals)\n heating_rate_matrix_k_day01[numpy.isnan(heating_rate_matrix_k_day01)] = 0.\n\n vector_target_names = example_dict[VECTOR_TARGET_NAMES_KEY]\n found_heating_rate = SHORTWAVE_HEATING_RATE_NAME in vector_target_names\n if not found_heating_rate:\n vector_target_names.append(SHORTWAVE_HEATING_RATE_NAME)\n\n heating_rate_index = vector_target_names.index(SHORTWAVE_HEATING_RATE_NAME)\n example_dict[VECTOR_TARGET_NAMES_KEY] = vector_target_names\n\n if found_heating_rate:\n example_dict[VECTOR_TARGET_VALS_KEY][..., heating_rate_index] = (\n heating_rate_matrix_k_day01\n )\n else:\n example_dict[VECTOR_TARGET_VALS_KEY] = numpy.insert(\n example_dict[VECTOR_TARGET_VALS_KEY],\n obj=heating_rate_index, values=heating_rate_matrix_k_day01, axis=-1\n )\n\n return example_dict",
"def _calculate_strehl(self):\n\n self.strehl = np.exp(-1*((2*np.pi/self.science_wavelength)*self.high_order_wfe)**2)",
"def year_emissions_intensity_rule(_m, y):\r\n\r\n return m.YEAR_EMISSIONS[y] / m.YEAR_DEMAND[y]",
"def exp_weight(season_rate, season_count_fraction):\n return np.exp(- season_rate * season_count_fraction)",
"def year_cost_rule(_m, y):\r\n\r\n return sum(m.RHO[y, s] * m.SCEN[y, s] for s in m.S)",
"def sd_to_XYZ_tristimulus_weighting_factors_ASTME308(\n sd,\n cmfs=STANDARD_OBSERVERS_CMFS['CIE 1931 2 Degree Standard Observer']\n .copy().trim(ASTME308_PRACTISE_SHAPE),\n illuminant=sd_ones(ASTME308_PRACTISE_SHAPE),\n k=None):\n\n if illuminant.shape != cmfs.shape:\n runtime_warning(\n 'Aligning \"{0}\" illuminant shape to \"{1}\" colour matching '\n 'functions shape.'.format(illuminant.name, cmfs.name))\n illuminant = illuminant.copy().align(cmfs.shape)\n\n if sd.shape.boundaries != cmfs.shape.boundaries:\n runtime_warning('Trimming \"{0}\" spectral distribution shape to \"{1}\" '\n 'colour matching functions shape.'.format(\n illuminant.name, cmfs.name))\n sd = sd.copy().trim(cmfs.shape)\n\n W = tristimulus_weighting_factors_ASTME2022(\n cmfs, illuminant,\n SpectralShape(cmfs.shape.start, cmfs.shape.end, sd.shape.interval), k)\n start_w = cmfs.shape.start\n end_w = cmfs.shape.start + sd.shape.interval * (W.shape[0] - 1)\n W = adjust_tristimulus_weighting_factors_ASTME308(\n W, SpectralShape(start_w, end_w, sd.shape.interval), sd.shape)\n R = sd.values\n\n XYZ = np.sum(W * R[..., np.newaxis], axis=0)\n\n return from_range_100(XYZ)",
"def setup_percentiles_pediatrics_new():\n df_cdc = pd.read_csv(Path(\"growthviz-data/ext/growthfile_cdc_ext.csv.gz\"))\n df_who = pd.read_csv(Path(\"growthviz-data/ext/growthfile_who.csv.gz\"))\n df = df_cdc.merge(df_who, on=[\"agedays\", \"sex\"], how=\"left\")\n\n # Add weighting columns to support smoothing between 2-4yo\n df = df.assign(ageyears=lambda r: (r[\"agedays\"] / 365.25))\n df[\"cdcweight\"] = 0\n df.loc[df[\"ageyears\"].between(2, 4, inclusive=\"left\"), \"cdcweight\"] = (\n df[\"ageyears\"] - 2\n )\n df[\"whoweight\"] = 0\n df.loc[df[\"ageyears\"].between(2, 4, inclusive=\"left\"), \"whoweight\"] = (\n 4 - df[\"ageyears\"]\n )\n\n PERCENTILES = [0.03, 0.05, 0.10, 0.25, 0.50, 0.75, 0.85, 0.90, 0.95, 0.97]\n\n # Compute percentiles for the full set of vars\n for s in [\"who\", \"cdc\"]:\n pvars = [\"ht\", \"wt\"]\n if s == \"cdc\":\n pvars.append(\"bmi\")\n for p in pvars:\n for pct in PERCENTILES:\n lvar = f\"{s}_{p}_l\"\n mvar = f\"{s}_{p}_m\"\n svar = f\"{s}_{p}_s\"\n tvar = f\"{s}_{p}_p{int(pct * 100)}\"\n df.loc[df[lvar] == 0, tvar] = df[mvar] * (df[svar] ** norm.ppf(pct))\n df.loc[df[lvar] != 0, tvar] = df[mvar] * (\n 1 + (df[lvar] * df[svar] * norm.ppf(pct))\n ) ** (1 / df[lvar])\n\n # Add smoothed percentiles\n for p in [\"ht\", \"wt\"]:\n for pct in PERCENTILES:\n cdc_var = f\"cdc_{p}_p{int(pct * 100)}\"\n who_var = f\"who_{p}_p{int(pct * 100)}\"\n s_var = f\"s_{p}_p{int(pct * 100)}\"\n df.loc[df[\"ageyears\"] <= 2, s_var] = df[who_var]\n df.loc[df[\"ageyears\"].between(2, 4, inclusive=\"neither\"), s_var] = (\n (df[who_var] * df[\"whoweight\"]) + (df[cdc_var] * df[\"cdcweight\"])\n ) / 2\n df.loc[df[\"ageyears\"] >= 4, s_var] = df[cdc_var]\n\n return df",
"def get_shocks(self):\r\n \r\n \r\n '''\r\n \r\n if self.jacW == True:\r\n \r\n if self.t_sim == self.s:\r\n \r\n self.wage = .833333 + self.dx\r\n \r\n print(\"made it here\")\r\n \r\n else:\r\n \r\n self.wage = .833333\r\n \r\n \r\n PermShkDstn_U = Lognormal(np.log(self.mu_u) - (self.L*(self.PermShkStd[0])**2)/2 , self.L*self.PermShkStd[0] , 123).approx(self.PermShkCount) #Permanent Shock Distribution faced when unemployed\r\n PermShkDstn_E = MeanOneLogNormal( self.PermShkStd[0] , 123).approx(self.PermShkCount) #Permanent Shock Distribution faced when employed\r\n \r\n TranShkDstn_E = MeanOneLogNormal( self.TranShkStd[0],123).approx(self.TranShkCount)#Transitory Shock Distribution faced when employed\r\n TranShkDstn_E.X = (TranShkDstn_E.X *(1-self.tax_rate)*self.wage*self.N)/(1-self.UnempPrb)**2 #add wage, tax rate and labor supply\r\n \r\n lng = len(TranShkDstn_E.X )\r\n TranShkDstn_U = DiscreteDistribution(np.ones(lng)/lng, self.IncUnemp*np.ones(lng)) #Transitory Shock Distribution faced when unemployed\r\n \r\n IncShkDstn_E = combine_indep_dstns(PermShkDstn_E, TranShkDstn_E) # Income Distribution faced when Employed\r\n IncShkDstn_U = combine_indep_dstns(PermShkDstn_U,TranShkDstn_U) # Income Distribution faced when Unemployed\r\n \r\n #Combine Outcomes of both distributions\r\n X_0 = np.concatenate((IncShkDstn_E.X[0],IncShkDstn_U.X[0]))\r\n X_1=np.concatenate((IncShkDstn_E.X[1],IncShkDstn_U.X[1]))\r\n X_I = [X_0,X_1] #discrete distribution takes in a list of arrays\r\n \r\n #Combine pmf Arrays\r\n pmf_I = np.concatenate(((1-self.UnempPrb)*IncShkDstn_E.pmf, self.UnempPrb*IncShkDstn_U.pmf))\r\n \r\n IncShkDstn = [DiscreteDistribution(pmf_I, X_I)]\r\n \r\n self.IncShkDstn = IncShkDstn\r\n \r\n \r\n '''\r\n \r\n PermShkNow = np.zeros(self.AgentCount) # Initialize shock arrays\r\n TranShkNow = np.zeros(self.AgentCount)\r\n newborn = self.t_age == 0\r\n for t in range(self.T_cycle):\r\n these = t == self.t_cycle\r\n N = np.sum(these)\r\n if N > 0:\r\n IncShkDstnNow = self.IncShkDstn[\r\n t - 1\r\n ] # set current income distribution\r\n PermGroFacNow = self.PermGroFac[t - 1] # and permanent growth factor\r\n # Get random draws of income shocks from the discrete distribution\r\n IncShks = IncShkDstnNow.draw(N)\r\n\r\n PermShkNow[these] = (\r\n IncShks[0, :] * PermGroFacNow\r\n ) # permanent \"shock\" includes expected growth\r\n TranShkNow[these] = IncShks[1, :]\r\n \r\n # That procedure used the *last* period in the sequence for newborns, but that's not right\r\n # Redraw shocks for newborns, using the *first* period in the sequence. Approximation.\r\n N = np.sum(newborn)\r\n if N > 0:\r\n these = newborn\r\n IncShkDstnNow = self.IncShkDstn[0] # set current income distribution\r\n PermGroFacNow = self.PermGroFac[0] # and permanent growth factor\r\n\r\n # Get random draws of income shocks from the discrete distribution\r\n EventDraws = IncShkDstnNow.draw_events(N)\r\n PermShkNow[these] = (\r\n IncShkDstnNow.X[0][EventDraws] * PermGroFacNow\r\n ) # permanent \"shock\" includes expected growth\r\n TranShkNow[these] = IncShkDstnNow.X[1][EventDraws]\r\n # PermShkNow[newborn] = 1.0\r\n TranShkNow[newborn] = 1.0\r\n\r\n # Store the shocks in self\r\n self.EmpNow = np.ones(self.AgentCount, dtype=bool)\r\n self.EmpNow[TranShkNow == self.IncUnemp] = False\r\n self.shocks['PermShk'] = PermShkNow\r\n self.shocks['TranShk'] = TranShkNow",
"def exp_impact_mortality(impact, exp_iimp, exposures, key, hazard, imp_fun, insure_flag, kanton):\r\n if not exp_iimp.size:\r\n return \r\n \r\n if kanton is None:\r\n kanton_name = 'CH'\r\n else:\r\n kanton_name = kanton\r\n \r\n directory = '../../input_data/impact_functions/'\r\n \r\n annual_deaths = pd.read_excel(''.join([directory, 'annual_deaths.xlsx']), sheet_name = key)\r\n # file containing the number of annual deaths per CH / Canton for each age category\r\n \r\n # PREPROCESSING STEP:\r\n \r\n # get assigned centroids\r\n icens = exposures[INDICATOR_CENTR + hazard.tag.haz_type].values[exp_iimp]\r\n # get affected intensities\r\n temperature_matrix = hazard.intensity[:, icens] # intensity of the hazard\r\n # get affected fractions\r\n fract = hazard.fraction[:, icens] # frequency of the hazard\r\n # get exposure values\r\n exposure_values = exposures.value.values[exp_iimp] \r\n\r\n expected_deaths = {}\r\n daily_deaths = annual_deaths[annual_deaths['Canton'] == kanton_name]['Annual_deaths'].values[0] / 365\r\n max_temp = temperature_matrix.max()\r\n for value in range(22, int(np.ceil(max_temp)) + 1):\r\n expected_deaths[value] = daily_deaths / imp_fun.calc_mdr(value)\r\n #print(expected_deaths)\r\n\r\n # Compute impact matrix\r\n matrix = impact_mortality(temperature_matrix, exposure_values, icens, expected_deaths, imp_fun, fract.shape)\r\n\r\n if insure_flag and matrix.nonzero()[0].size:\r\n inten_val = hazard.intensity[:, icens].todense()\r\n paa = np.interp(inten_val, imp_fun.intensity, imp_fun.paa)\r\n matrix = np.minimum(np.maximum(matrix - \\\r\n exposures.deductible.values[exp_iimp] * paa, 0), \\\r\n exposures.cover.values[exp_iimp])\r\n impact.eai_exp[exp_iimp] += np.sum(np.asarray(matrix) * \\\r\n hazard.frequency.reshape(-1, 1), axis=0)\r\n else:\r\n impact.eai_exp[exp_iimp] += np.squeeze(np.asarray(np.sum( \\\r\n matrix.multiply(hazard.frequency.reshape(-1, 1)), axis=0)))\r\n\r\n impact.at_event += np.squeeze(np.asarray(np.sum(matrix, axis=1)))\r\n impact.tot_value += np.sum(exposures.value.values[exp_iimp])\r\n if not isinstance(impact.imp_mat, list):\r\n impact.imp_mat[:, exp_iimp] = matrix",
"def _fluxes_to_heating_rate(down_fluxes_w_m02, up_fluxes_w_m02, pressures_pa):\n\n target_matrix = numpy.vstack((down_fluxes_w_m02, up_fluxes_w_m02))\n target_matrix = numpy.expand_dims(numpy.transpose(target_matrix), axis=0)\n predictor_matrix = numpy.expand_dims(pressures_pa, axis=-1)\n predictor_matrix = numpy.expand_dims(predictor_matrix, axis=0)\n\n dummy_example_dict = {\n example_utils.VECTOR_TARGET_NAMES_KEY: [\n example_utils.SHORTWAVE_DOWN_FLUX_NAME,\n example_utils.SHORTWAVE_UP_FLUX_NAME\n ],\n example_utils.VECTOR_TARGET_VALS_KEY: target_matrix,\n example_utils.VECTOR_PREDICTOR_NAMES_KEY: [example_utils.PRESSURE_NAME],\n example_utils.VECTOR_PREDICTOR_VALS_KEY: predictor_matrix\n }\n dummy_example_dict = example_utils.fluxes_to_heating_rate(\n dummy_example_dict\n )\n\n return example_utils.get_field_from_dict(\n example_dict=dummy_example_dict,\n field_name=example_utils.SHORTWAVE_HEATING_RATE_NAME\n )[0, :]",
"def tristimulus_weighting_factors_ASTME2022(cmfs, illuminant, shape, k=None):\n\n if cmfs.shape.interval != 1:\n raise ValueError('\"{0}\" shape \"interval\" must be 1!'.format(cmfs))\n\n if illuminant.shape.interval != 1:\n raise ValueError(\n '\"{0}\" shape \"interval\" must be 1!'.format(illuminant))\n\n global _TRISTIMULUS_WEIGHTING_FACTORS_CACHE\n if _TRISTIMULUS_WEIGHTING_FACTORS_CACHE is None:\n _TRISTIMULUS_WEIGHTING_FACTORS_CACHE = CaseInsensitiveMapping()\n\n name_twf = ', '.join((cmfs.name, illuminant.name, str(shape), str(k)))\n if name_twf in _TRISTIMULUS_WEIGHTING_FACTORS_CACHE:\n return _TRISTIMULUS_WEIGHTING_FACTORS_CACHE[name_twf]\n\n Y = cmfs.values\n S = illuminant.values\n\n interval_i = DEFAULT_INT_DTYPE(shape.interval)\n W = S[::interval_i, np.newaxis] * Y[::interval_i, :]\n\n # First and last measurement intervals *Lagrange Coefficients*.\n c_c = lagrange_coefficients_ASTME2022(interval_i, 'boundary')\n # Intermediate measurement intervals *Lagrange Coefficients*.\n c_b = lagrange_coefficients_ASTME2022(interval_i, 'inner')\n\n # Total wavelengths count.\n w_c = len(Y)\n # Measurement interval interpolated values count.\n r_c = c_b.shape[0]\n # Last interval first interpolated wavelength.\n w_lif = w_c - (w_c - 1) % interval_i - 1 - r_c\n\n # Intervals count.\n i_c = W.shape[0]\n i_cm = i_c - 1\n\n # \"k\" is used as index in the nested loop.\n k_n = k\n\n for i in range(3):\n # First interval.\n for j in range(r_c):\n for k in range(3):\n W[k, i] = W[k, i] + c_c[j, k] * S[j + 1] * Y[j + 1, i]\n\n # Last interval.\n for j in range(r_c):\n for k in range(i_cm, i_cm - 3, -1):\n W[k, i] = (W[k, i] + c_c[r_c - j - 1, i_cm - k] * S[j + w_lif]\n * Y[j + w_lif, i])\n\n # Intermediate intervals.\n for j in range(i_c - 3):\n for k in range(r_c):\n w_i = (r_c + 1) * (j + 1) + 1 + k\n W[j, i] = W[j, i] + c_b[k, 0] * S[w_i] * Y[w_i, i]\n W[j + 1, i] = W[j + 1, i] + c_b[k, 1] * S[w_i] * Y[w_i, i]\n W[j + 2, i] = W[j + 2, i] + c_b[k, 2] * S[w_i] * Y[w_i, i]\n W[j + 3, i] = W[j + 3, i] + c_b[k, 3] * S[w_i] * Y[w_i, i]\n\n # Extrapolation of potential incomplete interval.\n for j in range(\n DEFAULT_INT_DTYPE(w_c - ((w_c - 1) % interval_i)), w_c, 1):\n W[i_cm, i] = W[i_cm, i] + S[j] * Y[j, i]\n\n W *= 100 / np.sum(W, axis=0)[1] if k_n is None else k_n\n\n _TRISTIMULUS_WEIGHTING_FACTORS_CACHE[name_twf] = W\n\n return W",
"def calories_extract(og, fg):\n\n return 13.5 * fg * real_extract(og, fg)",
"def statsi(h):\n\n # Define constants\n zsa = np.array([0.0, 11000.0, 20000.0, 32000.0, 47000.0, 52000.0, 61000.0, 79000.0, 9.9e20])\n Tsa = np.array([288.15, 216.65, 216.65, 228.65, 270.65, 270.65,252.65, 180.65, 180.65])\n g = 9.80665\n R = 287.0528\n Re = 6346766.0\n Psa = 101325.0\n\n # Calculate geopotential altitude\n z = Re*h/(Re+h)\n\n # Loop through atmosphere layers\n for i in range(8):\n \n # Calculate layer temperature gradient\n Lt = -(Tsa[i+1]-Tsa[i])/(zsa[i+1]-zsa[i])\n\n # If no temperature gradient\n if Lt == 0.0:\n\n # Are we in this layer of the atmosphere?\n if z <= zsa[i+1]:\n t = Tsa[i] # Temp isn't changing\n p = Psa*np.exp(-g*(z-zsa[i])/R/Tsa[i])\n d = p/R/t\n break\n\n # We need to go higher\n else:\n Psa *= np.exp(-g*(zsa[i+1]-zsa[i])/R/Tsa[i])\n\n # Temperature gradient\n else:\n ex = g/R/Lt\n if z <= zsa[i+1]:\n t = Tsa[i]-Lt*(z-zsa[i])\n p = Psa*(t/Tsa[i])**ex\n d = p/R/t\n break\n else:\n Psa *= (Tsa[i+1]/Tsa[i])**ex\n\n # We have left the atmosphere...\n else:\n t = Tsa[-1]\n p = 0.0\n d = 0.0\n\n return z, t, p, d",
"def compute_stability_fm_h(H, t0, u_attr, r_air, hc, d0, z0m, cp=1004.16):\n L_ob = H.expression(\n '-(r_air * cp * t0 * (u_attr ** 3.0) / 0.41 / 9.806 / H)',\n {'cp': cp, 'H': H, 'r_air': r_air, 't0': t0, 'u_attr': u_attr})\n L_ob = L_ob.where(L_ob.gte(0), -99.0)\n mm_h = H \\\n .expression(\n '((1 - (16.0 * (hc - d0) / L_ob)) ** 0.25)',\n {'d0': d0, 'hc': hc, 'L_ob': L_ob}) \\\n .where(L_ob.eq(-99.0), 0.0)\n fm_h = H \\\n .expression(\n '2.0 * log((1.0 + mm_h) / 2.0) + log((1.0 + (mm_h ** 2)) / 2.0) - '\n '2.0 * atan(mm_h) + (pi / 2)',\n {'mm_h': mm_h, 'pi': math.pi}) \\\n .where(L_ob.lte(-100).Or(L_ob.gte(100)), 0)\n\n # CGM - Swapped order of calc since d0 is an image compute from hc and\n # z_u is being set as a constant number (for now).\n fm_h = fm_h.where(fm_h.eq(hc.subtract(d0).divide(z0m).log()), fm_h.add(1.0))\n # fm_h = fm_h.where(fm_h.eq(hc.subtract(d0).divide(z0m).log()), fm_h.add(1.0))\n return fm_h",
"def calc_h2_working_cap(isotmt_dict): # pylint: disable=too-many-locals\n\n out_dict = {}\n out_dict['is_porous'] = isotmt_dict['is_porous']\n\n if out_dict['is_porous']:\n press2index = {}\n temp2index = {}\n for press in 1, 5, 100:\n press2index[press] = isotmt_dict['isotherm'][0]['pressure'].index(press)\n for temp in 77, 198, 298:\n temp2index[temp] = isotmt_dict['temperature'].index(temp)\n\n case2pt = {'a': [[100, 198], [5, 298]], 'b': [[100, 77], [5, 77]], 'c': [[100, 77], [1, 77]]}\n\n unitconv = {\n 'wt%': # convert mol/kg to wt%\n get_molec_uc_to_mg_g(isotmt_dict) / isotmt_dict['conversion_factor_molec_uc_to_mol_kg'] / 10,\n 'g/L': # convert mol/kg to g/L\n get_molec_uc_to_mg_g(isotmt_dict) / isotmt_dict['conversion_factor_molec_uc_to_mol_kg'] *\n isotmt_dict['Density']\n }\n\n for case, presstemp in case2pt.items():\n for unit, conv in unitconv.items():\n load_average = isotmt_dict['isotherm'][temp2index[presstemp[0][1]]]['loading_absolute_average'][\n press2index[presstemp[0][0]]]\n disc_average = isotmt_dict['isotherm'][temp2index[presstemp[1][1]]]['loading_absolute_average'][\n press2index[presstemp[1][0]]]\n load_dev = isotmt_dict['isotherm'][temp2index[presstemp[0][1]]]['loading_absolute_dev'][press2index[\n presstemp[0][0]]]\n disc_dev = isotmt_dict['isotherm'][temp2index[presstemp[1][1]]]['loading_absolute_dev'][press2index[\n presstemp[1][0]]]\n out_dict.update({\n 'case-{}_{}_unit'.format(case, unit): unit,\n 'case-{}_{}_average'.format(case, unit): (load_average - disc_average) * conv,\n 'case-{}_{}_dev'.format(case, unit): sqrt(load_dev**2 + disc_dev**2) * conv\n })\n\n return Dict(dict=out_dict)",
"def early_warnings_sensitivity_analysis(series,\n indicators=['var','ac'],\n winsizerange = [0.10, 0.8],\n incrwinsize = 0.10,\n smooth = \"Gaussian\",\n bandwidthrange = [0.05, 1.],\n spanrange = [0.05, 1.1],\n incrbandwidth = 0.2,\n incrspanrange = 0.1):\n\n results_kendal_tau = []\n for winsize in np.arange(winsizerange[0],winsizerange[1]+0.01,incrwinsize):\n\n winsize = round(winsize,3)\n if smooth == \"Gaussian\":\n\n for bw in np.arange(bandwidthrange[0], bandwidthrange[1]+0.01, incrbandwidth):\n\n bw = round(bw, 3)\n ews_dic_veg = ewstools.core.ews_compute(series.dropna(),\n roll_window=winsize,\n smooth=smooth,\n lag_times=[1, 2],\n ews=indicators,\n band_width=bw)\n\n result = ews_dic_veg['Kendall tau']\n result['smooth'] = bw\n result['winsize'] = winsize\n\n results_kendal_tau.append(result)\n\n\n elif smooth ==\"Lowess\":\n\n for span in np.arange(spanrange[0], spanrange[1]+0.01, incrspanrange):\n\n span = round(span,2)\n ews_dic_veg = ewstools.core.ews_compute(series.dropna(),\n roll_window=winsize,\n smooth=smooth,\n lag_times=[1, 2],\n ews=indicators,\n span=span)\n\n result = ews_dic_veg['Kendall tau']\n result['smooth'] = bw\n result['winsize'] = winsize\n\n results_kendal_tau.append(result)\n\n else:\n\n ews_dic_veg = ewstools.core.ews_compute(series.dropna(),\n roll_window=winsize,\n smooth='None',\n lag_times=[1, 2],\n ews=indicators)\n\n result = ews_dic_veg['Kendall tau']\n result['smooth'] = 0\n result['winsize'] = winsize\n\n results_kendal_tau.append(result)\n\n sensitivity_df = pd.concat(results_kendal_tau)\n\n return sensitivity_df",
"def func_kc_318(n, series):\n if series == \"3D3\":\n try:\n return 2*np.pi/(wl_3D3[str(n)]*1e-9)\n except:\n return 0",
"def F_calctimescales(i, st, dm):\n mr = st.mn*dm.mxkg_v[i]/(st.mn+dm.mxkg_v[i]) # reduced mass, kg\n # containment time (orbits within Rs)\n t1 = c_yr2s( 2.7e-2 * m.pow(dm.mxkg_v[i]/st.mn,1.5) / (dm.sigx/1.e-55) ) # s \n # therm time\n tth1 = c_yr2s( 2.5e+5 * m.pow(dm.mxkg_v[i]/st.mn,2.) * m.pow(st.mn/mr,3.) / (dm.sigx/1.e-55) ) # s\n# tth2 = m.pow(dm.mxkg_v[i]/st.mn,2.) * pF / 6. / m.sqrt(2.) / st.Temp / st.nb / dm.sigx * m.pow(st.mn/mr,3.) \n tth2 = m.pow(dm.mxkg_v[i],2.)*st.mn*pF /4./m.sqrt(2.)/(st.nb*1.e+6)/dm.sigx_m/m.pow(st.mn,3.)/st.Eth\n tth = tth2\n print \"-- Time scales: t1=%.2e , tth=%.2e, tth1=%.2e, tth2=%.2e\" % (t1,tth,tth1,tth2)\n return t1, tth",
"def fwhmwhisker_multiext(filename,sigma,band,zenith):\n hdu=pf.open(filename)\n e1=[]\n e2=[]\n fwhmw=[]\n whiskerw=[]\n for hdui in hdu[1:]:\n Nobj = hdui.data.shape[0]\n for i in range(Nobj):\n print i\n img = hdui.data[i][4:].reshape(160,160)\n imgrbin = rebin(img,(40,40))\n res=wfwhm(imgrbin,sigma)\n e1.append(res[0])\n e2.append(res[1])\n whiskerw.append(res[2]*0.27)\n fwhmw.append(res[3]*0.27)\n e1 = np.array(e1)\n e2 = np.array(e2)\n fwhmw = np.array(fwhmw)\n whiskerw = np.array(whiskerw)\n e1mean = e1.mean()\n e1std = e1.std()\n e2mean = e2.mean()\n e2std = e2.std()\n whiskerwmean = whiskerw.mean()\n whiskerwstd = whiskerw.std()\n fwhmwmean = fwhmw.mean()\n fwhmwstd = fwhmw.std()\n r50mean = np.mean(fwhmw/2.)\n r50std = np.std(fwhmw/2.)\n pl.figure(figsize=(15,10))\n pl.subplot(2,3,1)\n pl.hist(e1,bins=20,normed=True)\n pl.xlabel('e1')\n pl.title('mean: '+str(round(e1mean,6))+' std: '+str(round(e1std,5)))\n pl.subplot(2,3,2)\n pl.hist(e2,bins=20,normed=True)\n pl.xlabel('e2')\n pl.title('mean: '+str(round(e2mean,6))+' std: '+str(round(e2std,5)))\n pl.subplot(2,3,3)\n pl.hist(whiskerw,bins=20,normed=True)\n pl.xlabel('whisker')\n pl.title('mean: '+str(round(whiskerwmean,5))+' std: '+str(round(whiskerwstd,5)))\n pl.subplot(2,3,4)\n pl.hist(fwhmw,bins=20,normed=True)\n pl.xlabel('fwhm')\n pl.title('mean: '+str(round(fwhmwmean,5))+' std: '+str(round(fwhmwstd,5)))\n pl.subplot(2,3,5)\n pl.hist(fwhmw/2.,bins=20,normed=True)\n pl.xlabel('r50')\n pl.title('mean: '+str(round(r50mean,5))+' std: '+str(round(r50std,5)))\n pl.figtext(0.7,0.4,'band: '+band)\n pl.figtext(0.7,0.37,'zenith angle: '+zenith +' deg')\n pl.figtext(0.3,0.95,'Perfect focus/alignment, 0.7 arcsec fwhm circular seeing',fontsize=18,color='red')\n pl.savefig(filename[0:-6]+'png')\n np.savetxt(filename[0:-6]+'txt',[e1mean,e1std,e2mean,e2std,whiskerwmean,whiskerwstd,fwhmwmean,fwhmwstd,r50mean,r50std],fmt='%10.5f')\n pl.close()\n return '---done !-----'",
"def func_d23_318(n, series):\n if series == \"3D3\":\n try: \n return np.sqrt((3*os_3D3[str(n)]*wl_3D3[str(n)]*1e-9*hbar*e**2)/(4*np.pi*m_e*c))\n except:\n return 0",
"def get_specific_heat() -> float:\n return 1006.0",
"def compute_gain( hv, cv, thot, tcold):\n\n nData = len(cv) \n epsilons = np.full( nData, EPSILON)\n\n # For full Temp calibration, a spectrum taken at high elevation, away from \n # The galactic plan is used.\n dv = hv - cv \n dv = np.maximum( dv, epsilons)\n hv = np.maximum( hv, epsilons)\n\n # use the comparison of hot and cold to get gain without trx contribution\n gain = (thot - tcold)/dv\n\n trx = gain*cv \n trx = trx - tcold # correct for cold load input in counts\n\n n6 = int(nData/6)\n n56 = 5*n6\n\n trxmedian = np.median( trx[n6:n56])\n gain = (thot + trxmedian)/hv\n \n return trxmedian, gain # channel by channel gain in K/counts"
] | [
"0.61604285",
"0.56509274",
"0.55025834",
"0.53923845",
"0.53762066",
"0.5370133",
"0.5294281",
"0.52752477",
"0.5265615",
"0.520303",
"0.5171824",
"0.51513815",
"0.5132893",
"0.5092595",
"0.5088409",
"0.5087371",
"0.50813",
"0.5065104",
"0.50638103",
"0.5060144",
"0.50426376",
"0.5042543",
"0.50123984",
"0.49967948",
"0.4993396",
"0.49680945",
"0.49657336",
"0.49585333",
"0.4946481",
"0.494424"
] | 0.5779782 | 1 |
Read taxonomy nodes.dmp file into pandas DataFrame | def read_nodes_dmp(fname):
df = pd.read_csv(fname, sep="|", header=None, index_col=False,
names=['tax_id',
'parent_tax_id',
'rank',
'embl_code',
'division_id',
'inherited_div_flag', # 1 or 0
'genetic_code_id',
'inherited_GC_flag', # 1 or 0
'mitochondrial_genetic_code_id',
'inherited_MGC_flag', # 1 or 0
'GenBank_hidden_flag',
'hidden_subtree_root_flag', # 1 or 0
'comments'])
return df.assign(rank = lambda x: x['rank'].str.strip(),
embl_code = lambda x: x['embl_code'].str.strip(),
comments = lambda x: x['comments'].str.strip()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _read_node_file(self):\n self.node_df = gt.remove_colons(pd.read_csv(self.node_file, dtype=str))",
"def nodes_df_creation(self, path: str) -> pyspark.sql.dataframe.DataFrame:\n try:\n nodes_df = self.spark.read.parquet(path)\n except OSError:\n print('cannot open', path)\n nodes_df = nodes_df.select('id', 'tags').filter(size(col('tags')) > 0)\n nodes_df = nodes_df.select(col('id'), explode(col('tags')).name('exploded_tags'))\\\n .filter(col('exploded_tags.key') == 'amenity')\n nodes_df = nodes_df.select(\"id\", 'exploded_tags.value').withColumnRenamed('id', 'nodeId')\\\n .withColumnRenamed('value', 'amenity_type')\n return nodes_df",
"def read_data(self):\n data = pd.read_table(self.file_dir, sep=\"\\t\", header=None)\n data.columns = [\"FromNodeId\", \"ToNodeId\"]\n return data",
"def parse_data(path_to_file):\n\n line_dict, rel_dict = create_dicts(path_to_file)\n \n line_df = create_dataframe(line_dict, ['line'])\n rel_df = create_dataframe(rel_dict, ['relation'])\n\n line_df['relation'] = rel_df['relation']\n\n return (line_df, rel_df)",
"def parse_data(self, path_to_file):\n\n line_dict, rel_dict = self.create_dicts(path_to_file)\n \n line_df = self.create_dataframe(line_dict, ['line'])\n rel_df = self.create_dataframe(rel_dict, ['relation'])\n\n line_df['relation'] = rel_df['relation']\n\n return (line_df, rel_df)",
"def read_names_dmp(fname):\n df = pd.read_csv(fname, sep=\"|\", header=None, index_col=False,\n names=[\"tax_id\",\n \"name_txt\",\n \"unique_name\",\n \"name_class\"])\n return df.assign(name_txt = lambda x: x['name_txt'].str.strip(),\n unique_name = lambda x: x['unique_name'].str.strip(),\n name_class = lambda x: x['name_class'].str.strip())",
"def get_reactome_hierarchy_df() -> pd.DataFrame:\n return pd.read_csv(REACTOME_HIERARCHICAL_MAPPINGS_PATH, sep='\\t')",
"def load(file):\n return pq.read_table(file).to_pandas()",
"def gff3_to_dataframe( file ):\n result = _read_gff3_using_pandas( file )\n extract_attributes_to_columns( result, ['ID', 'Parent', 'Name' ] )\n return result",
"def get_hashtags_df(graph_path: str = '/data/graphs/train_graph.p') -> pd.DataFrame:\n with open(PATH + graph_path, 'rb') as f:\n G = pickle.load(f)\n\n hashtags = [{'hashtag': node, **G.nodes[node]}\n for node in G.nodes\n if G.nodes[node]['node_type'] == 'hashtag']\n hashtags = pd.DataFrame(hashtags)\n return hashtags",
"def read_graph():\n return nx.read_edgelist('edges_new.txt', delimiter='\\t')",
"def load_file_to_dataframe(self, file_path: str) -> pd.DataFrame:\n return pd.read_csv(file_path, sep=\"\\t\")",
"def txt_to_dataframe(folder,name_parcellation):\n column_weight = ['patients','degree', 'density', 'global_efficiency', 'transitivity', 'assortavity', 'clustering_coef',\n 'fiedler_value', 'small_worldness','Null']\n\n file_name=folder+name_parcellation+'.txt'\n data=pd.read_csv(file_name,header=None,delimiter=';')\n data.columns=column_weight\n data=data.drop(['Null'],axis=1)\n file_len=folder+name_parcellation+'_len.txt'\n data_len=only_connected_patients(file_len)\n data_len=data_len.values\n data['length']=data_len\n data=data[data['length']>-1.0]\n data=data.reset_index(drop=True)\n return data",
"def read_graph():\n return nx.read_edgelist('edges.txt.gz', delimiter='\\t')",
"def load_patran_nod(nod_filename, node_ids):\n data_dict = read_patran(nod_filename, fdtype='float32', idtype='int32')\n nids = data_dict['nids']\n data = data_dict['data']\n data_headers = data_dict['headers']\n ndata = data.shape[0]\n if len(data.shape) == 1:\n shape = (ndata, 1)\n data = data.reshape(shape)\n\n if ndata != node_ids.shape[0]:\n inids = np.searchsorted(node_ids, nids)\n assert np.array_equal(nids, node_ids[inids]), 'the node ids are invalid'\n data2 = np.full(data.shape, np.nan, data.dtype)\n data2[inids, :] = data\n else:\n data2 = data\n\n A = {}\n fmt_dict = {}\n headers = data_headers['SEC']\n for i, header in enumerate(headers):\n A[header] = data2[:, i]\n fmt_dict[header] = '%f'\n return A, fmt_dict, headers",
"def abstract_dataframe(filename):\n pmid_ab_dict = medline_parser(filename)\n df = pd.DataFrame.from_dict(pmid_ab_dict, orient='index').reset_index()\n df.columns = ['PMID', 'Abstract']\n \"\"\"\n Parallelized tokenizer and gene pairs functions gene-network analysis.\n returns a dataframe with tokenized abstracts, gene_pairs and labels\n \"\"\"\n # df = parallel_tokenizer(df)\n # df = parallel_genepairs(df)\n \"\"\"create dictionary for networx_work\"\"\"\n df = topic_extraction(df, 'Abstract') # after topic extraction adds labels\n # df.to_csv('with_lda_labels.csv') # uncomment if you want to save the file\n # gene_dict = {entry[0]:entry[1:] for entry in df['gene_pairs'] if entry != None}\n # network_graph(gene_dict) # uncomment if you want to generate a networkx graph\n return df",
"def read_graph(graph_path):\n print(\"\\nTarget matrix creation started.\\n\")\n graph = nx.from_edgelist(pd.read_csv(graph_path).values.tolist())\n graph.remove_edges_from(graph.selfloop_edges())\n return graph",
"def convert_nfdump_to_dataframe(input_file):\r\n temporary_file_fd, temporary_file_name = tempfile.mkstemp()\r\n\r\n # Convert nflow to csv\r\n p = subprocess.Popen(\r\n [\"nfdump_modified/bin/nfdump -r \" + input_file + \" -o extended -o csv > \" + temporary_file_name],\r\n shell=True,\r\n stdout=subprocess.PIPE)\r\n p.communicate()\r\n p.wait()\r\n\r\n columns = ['start_time', # ts,\r\n 'end_time', # te,\r\n 'time duration', # td,\r\n 'src_ip', # sa,\r\n 'dst_ip', # da,\r\n 'src_port', # sp,\r\n 'dst_port', # dp,\r\n 'ip_proto', # pr,\r\n 'tcp_flag', # flg,\r\n 'forwarding', # fwd,\r\n 'src_tos', # stos,\r\n 'i_packets', # ipkt,\r\n 'i_bytes', # ibyt,\r\n 'o_packets', # opkt,\r\n 'o_bytes', # obyt,\r\n 'i_interface_num', # in,\r\n 'o_interface_num', # out,\r\n 'src_as', # sas,\r\n 'dst_as', # das,\r\n 'src_mask', # smk,\r\n 'dst_mask', # dmk,\r\n 'dst_tos', # dtos,\r\n 'direction', # dir,\r\n 'next_hop_ip', # nh,\r\n 'bgt_next_hop_ip', # enhb,\r\n 'src_vlan_label', # svln,\r\n 'dst_vlan_label', # dvln,\r\n 'i_src_mac', # ismc,\r\n 'o_dst_mac', # odmc,\r\n 'i_dst_mac', # idmc,\r\n 'o_src_mac', # osmc,\r\n 'mpls1',\r\n 'mpls2',\r\n 'mpls3',\r\n 'mpls4',\r\n 'mpls5',\r\n 'mpls6',\r\n 'mpls7',\r\n 'mpls8',\r\n 'mpls9',\r\n 'mpls10',\r\n 'cl',\r\n 'sl',\r\n 'al',\r\n 'ra',\r\n 'eng',\r\n 'exid',\r\n 'tr']\r\n\r\n # Reset file pointer to start of file\r\n\r\n df = pd.read_csv(temporary_file_name, low_memory=False)\r\n\r\n df.dropna(inplace=True, how='any')\r\n\r\n df['dp'] = df['dp'].astype('int32')\r\n df['ibyt'] = df['ibyt'].astype('int32')\r\n df['sp'] = df['sp'].astype('int32')\r\n\r\n df.columns = columns\r\n\r\n try:\r\n os.remove(temporary_file_name)\r\n except IOError:\r\n pass\r\n\r\n return df",
"def read_lexicon_into_df(lex_txt_file):\n data = []\n with open(lex_txt_file) as txtf:\n lines = txtf.readlines()\n for line in lines:\n root = re.search(r\"root='(.*?)'\", line).group(1)\n if root.startswith('0'):\n num_radicals = 3\n else:\n num_radicals = 4\n verb_class = re.search(r\"class='(.*?)'\", line).group(1)\n verb_type = re.search(r\"type='(.*?)'\", line).group(1)\n infinitive = re.search(r\"inf='(.*?)'\", line).group(1)\n languages = re.search(r\"lang='(.*?)'\", line).group(1)\n gloss = re.search(r\"gloss='(.*?)'\", line).group(1)\n\n data.append([root, num_radicals, verb_class, verb_type, infinitive, languages, gloss])\n\n lexicon_df = pd.DataFrame(data, columns=['root', 'num_radicals', 'class', 'type', 'infinitive', 'languages', 'gloss'])\n\n lexicon_df['root'] = lexicon_df['root'].str.replace(\"0\", \"\")\n lexicon_df = utify_chars(lexicon_df)\n lexicon_df.to_csv('babylex.csv')\n return lexicon_df",
"def read_graph(path):\n edge_list = pd.read_csv(path).values.tolist()\n graph = nx.from_edgelist(edge_list)\n return graph",
"def read_bed_file(path, labelnum=0):\n\n bed_df = pd.read_table(path, sep=\"\\t\", header=None)\n colnames = generate_colnames(bed_df, labelnum)\n bed_df.columns = colnames\n print(bed_df.head())\n return bed_df",
"def read_to_df(path):\n return pd.DataFrame.from_records(map(lambda x: typed_line(x, parse_normalized), read_lines(path, header=False)),\n columns=['user', 'item', 'rating'])",
"def parse_graph(node_distances_fp):\n\n dist_df = pd.read_csv(node_distances_fp, sep='\\t', index_col=0)\n\n # check all nodes are represented as rows and columns\n if all(dist_df.columns != dist_df.index):\n raise ValueError(f\"All vs all TSV must be square: {node_distances_fp}\"\n \" columns and row names do not match\")\n\n # check if all distances are floats\n if all(dist_df.dtypes != 'float64'):\n raise ValueError(f\"Non-float values in TSV: {node_distances_fp} \"\n \"please fix and choose an appropriate value for \"\n \"NaNs\")\n\n # check if distances are symmetric and deal with float epsilon\n if not np.all(np.abs(dist_df.values - dist_df.values.T) < 1e-8):\n raise ValueError(f\"Distances are not symmetrical: {node_distances_fp}\"\n \" please fix or modify code to create directed \"\n \"graph\")\n\n # get graph\n graph = nx.Graph(dist_df)\n\n return dist_df, graph",
"def load_nodes(filename):\n\n with open(filename) as f:\n reader = csv.DictReader(f)\n return [item for item in reader]",
"def transform_from_edgefile(filename, seed=None, dim=2):\n g = Graph.Read_Ncol(filename)\n\n layout = g.layout_drl(seed=seed,\n dim=dim,\n )\n\n xy = pd.DataFrame(vars(layout)[\"_coords\"], index=g.vs[\"name\"])\n\n return xy",
"def read_graph(filename, node_index_one=0, node_index_two=1):\n tsv = csv.reader(open(filename), delimiter='\\t')\n return make_graph(tsv, node_index_one, node_index_two)",
"def read_data(fname, cols):\n df = (pd.read_csv(fname, header=None, sep=r\"\\s+\", comment=\"#\",\n names=cols, dtype=np.float64)\n .iloc[1:]) # First line is the total number of trees\n # Could reset_index, but we don't shuffle the DataFrame\n return df",
"def load_labels(label_file) :\n df = pd.read_csv(label_file, index_col=\"p_index\",\n dtype=str, na_values=['nan', 'NaN', '']).dropna()\n\n return df",
"def treeToPanda(tree, variables, selection, nEntries, firstEntry, columnMask='default'):\n entries = tree.Draw(str(variables), selection, \"goffpara\", nEntries, firstEntry) # query data\n columns = variables.split(\":\")\n # replace column names\n # 1.) pandas does not allow dots in names\n # 2.) user can specified own mask\n for i, column in enumerate(columns):\n if columnMask == 'default':\n column = column.replace(\".fElements\", \"\").replace(\".fX$\", \"X\").replace(\".fY$\", \"Y\")\n else:\n masks = columnMask.split(\":\")\n for mask in masks:\n column = column.replace(mask, \"\")\n columns[i] = column.replace(\".\", \"_\")\n # print(i, column)\n # print(columns)\n ex_dict = {}\n for i, a in enumerate(columns):\n # print(i,a)\n val = tree.GetVal(i)\n ex_dict[a] = np.frombuffer(val, dtype=float, count=entries)\n df = pd.DataFrame(ex_dict, columns=columns)\n return df",
"def _read_edge_file(self):\n self.edge_df = gt.remove_colons(pd.read_csv(self.edge_file, dtype=str))"
] | [
"0.6789381",
"0.6212274",
"0.615097",
"0.6114801",
"0.6108535",
"0.5889607",
"0.5821102",
"0.58102983",
"0.57548654",
"0.56308955",
"0.5610583",
"0.56097096",
"0.5609186",
"0.55935985",
"0.5593163",
"0.5582398",
"0.55768555",
"0.55726385",
"0.55648285",
"0.55583286",
"0.55448854",
"0.5542895",
"0.55419874",
"0.5501428",
"0.54927814",
"0.5477818",
"0.54551244",
"0.54339653",
"0.5428845",
"0.5422457"
] | 0.78661364 | 0 |
Read taxonomy names.dmp file into pandas DataFrame | def read_names_dmp(fname):
df = pd.read_csv(fname, sep="|", header=None, index_col=False,
names=["tax_id",
"name_txt",
"unique_name",
"name_class"])
return df.assign(name_txt = lambda x: x['name_txt'].str.strip(),
unique_name = lambda x: x['unique_name'].str.strip(),
name_class = lambda x: x['name_class'].str.strip()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_nodes_dmp(fname):\n df = pd.read_csv(fname, sep=\"|\", header=None, index_col=False,\n names=['tax_id', \n 'parent_tax_id',\n 'rank', \n 'embl_code',\n 'division_id', \n 'inherited_div_flag', # 1 or 0\n 'genetic_code_id', \n 'inherited_GC_flag', # 1 or 0\n 'mitochondrial_genetic_code_id', \n 'inherited_MGC_flag', # 1 or 0\n 'GenBank_hidden_flag',\n 'hidden_subtree_root_flag', # 1 or 0 \n 'comments'])\n return df.assign(rank = lambda x: x['rank'].str.strip(),\n embl_code = lambda x: x['embl_code'].str.strip(),\n comments = lambda x: x['comments'].str.strip())",
"def load(file):\n return pq.read_table(file).to_pandas()",
"def read_sectionKLD_df(txtpath, dfname=False):\n df = pd.read_csv(txtpath, header=0)\n if not dfname:\n df.name = re.search(r'(\\w+)_kld.txt',basename(txtpath)).group(1)\n else:\n df.name = dfname\n return df",
"def txt_to_dataframe(folder,name_parcellation):\n column_weight = ['patients','degree', 'density', 'global_efficiency', 'transitivity', 'assortavity', 'clustering_coef',\n 'fiedler_value', 'small_worldness','Null']\n\n file_name=folder+name_parcellation+'.txt'\n data=pd.read_csv(file_name,header=None,delimiter=';')\n data.columns=column_weight\n data=data.drop(['Null'],axis=1)\n file_len=folder+name_parcellation+'_len.txt'\n data_len=only_connected_patients(file_len)\n data_len=data_len.values\n data['length']=data_len\n data=data[data['length']>-1.0]\n data=data.reset_index(drop=True)\n return data",
"def read_bed_file(path, labelnum=0):\n\n bed_df = pd.read_table(path, sep=\"\\t\", header=None)\n colnames = generate_colnames(bed_df, labelnum)\n bed_df.columns = colnames\n print(bed_df.head())\n return bed_df",
"def get_ctffind_4_1_0_meta(file_name: str) -> pd.DataFrame:\n extract_dict: typing.Dict[str, str]\n ctffind_meta_data: pd.DataFrame\n lines: typing.List[str]\n match: typing.Optional[typing.Match[str]]\n non_string_values: typing.Set[str]\n\n extract_dict = get_ctffind_4_1_0_extract_dict()\n ctffind_meta_data = pd.DataFrame(index=[0], columns=extract_dict.keys())\n with open(file_name, 'r') as read:\n lines = read.readlines()\n\n non_string_values = set([\n 'MicrographNameNoDW',\n 'version'\n ])\n for line in lines:\n for key, value in extract_dict.items():\n match = re.match(value, line)\n if match is not None:\n try:\n ctffind_meta_data[key] = float(match.group(1))\n except ValueError:\n assert key in non_string_values, f'{key}: {match.group(1)}'\n ctffind_meta_data[key] = match.group(1)\n else:\n pass\n return ctffind_meta_data",
"def parse_data(path_to_file):\n\n line_dict, rel_dict = create_dicts(path_to_file)\n \n line_df = create_dataframe(line_dict, ['line'])\n rel_df = create_dataframe(rel_dict, ['relation'])\n\n line_df['relation'] = rel_df['relation']\n\n return (line_df, rel_df)",
"def load_labels(label_file) :\n df = pd.read_csv(label_file, index_col=\"p_index\",\n dtype=str, na_values=['nan', 'NaN', '']).dropna()\n\n return df",
"def parse_data(self, path_to_file):\n\n line_dict, rel_dict = self.create_dicts(path_to_file)\n \n line_df = self.create_dataframe(line_dict, ['line'])\n rel_df = self.create_dataframe(rel_dict, ['relation'])\n\n line_df['relation'] = rel_df['relation']\n\n return (line_df, rel_df)",
"def load_file_to_dataframe(self, file_path: str) -> pd.DataFrame:\n return pd.read_csv(file_path, sep=\"\\t\")",
"def hoomdlog(filename):\r\n\r\n data = pd.read_csv(filename, sep = '\\s+')\r\n return data",
"def load_df_from_txt(fname, direc=\"data/result/\", sep=\"\\t\"):\n path = create_file_path(fname, direc)\n try:\n return multi_index_tsv_to_dataframe(path, sep, header_rows=None)\n except IOError:\n raise IOError(\"Failed to open '{}\".format(path))",
"def _read_node_file(self):\n self.node_df = gt.remove_colons(pd.read_csv(self.node_file, dtype=str))",
"def __prepareDataSet(fileName):\n\n labels = []\n utterances = []\n\n with open(fileName) as f:\n lines = f.readlines()\n\n for line in lines:\n try:\n act = line[:line.index(\" \")]\n utterance = line[line.index(\" \"):line.index(\"\\n\")]\n\n try:\n labels.append(act.strip())\n utterances.append(utterance.strip())\n\n except KeyError:\n pass\n\n except ValueError:\n pass\n\n return labels, utterances",
"def get_reactome_hierarchy_df() -> pd.DataFrame:\n return pd.read_csv(REACTOME_HIERARCHICAL_MAPPINGS_PATH, sep='\\t')",
"def read_lexicon_into_df(lex_txt_file):\n data = []\n with open(lex_txt_file) as txtf:\n lines = txtf.readlines()\n for line in lines:\n root = re.search(r\"root='(.*?)'\", line).group(1)\n if root.startswith('0'):\n num_radicals = 3\n else:\n num_radicals = 4\n verb_class = re.search(r\"class='(.*?)'\", line).group(1)\n verb_type = re.search(r\"type='(.*?)'\", line).group(1)\n infinitive = re.search(r\"inf='(.*?)'\", line).group(1)\n languages = re.search(r\"lang='(.*?)'\", line).group(1)\n gloss = re.search(r\"gloss='(.*?)'\", line).group(1)\n\n data.append([root, num_radicals, verb_class, verb_type, infinitive, languages, gloss])\n\n lexicon_df = pd.DataFrame(data, columns=['root', 'num_radicals', 'class', 'type', 'infinitive', 'languages', 'gloss'])\n\n lexicon_df['root'] = lexicon_df['root'].str.replace(\"0\", \"\")\n lexicon_df = utify_chars(lexicon_df)\n lexicon_df.to_csv('babylex.csv')\n return lexicon_df",
"def uadb_ascii_to_dataframe(filename, **kwargs):\n import datetime\n import zipfile\n import gzip\n import os\n import io\n import numpy as np\n import pandas as pd\n from . import support as sp\n\n if not os.path.isfile(filename):\n raise IOError(\"File not Found! %s\" % filename)\n\n if '.zip' in filename:\n archive = zipfile.ZipFile(filename, 'r')\n inside = archive.namelist()\n tmp = archive.open(inside[0])\n tmp = io.TextIOWrapper(tmp, encoding='utf-8')\n tmp = tmp.read()\n archive.close()\n data = tmp.splitlines() # Memory (faster)\n elif '.gz' in filename:\n\n with gzip.open(filename, 'rt', encoding='utf-8') as infile:\n tmp = infile.read() # alternative readlines (slower)\n data = tmp.splitlines() # Memory (faster)\n else:\n with open(filename, 'rt') as infile:\n tmp = infile.read() # alternative readlines (slower)\n data = tmp.splitlines() # Memory (faster)\n\n raw = []\n headers = []\n dates = []\n nmiss = 0\n iprev = 0\n search_h = False\n i = 0\n for i, line in enumerate(data):\n if line[0] == 'H':\n try:\n # Header\n usi = int(line[2:14]) # unique station identifier\n ident = line[15:21] # WMO\n idflag = int(line[22:24]) # id flag\n d_src = int(line[25:28]) # source dataset\n version = float(line[29:34]) # version\n dateflag = int(line[35:37]) # date flag\n year = line[38:42] # year\n month = \"%02d\" % int(line[43:45])\n day = \"%2d\" % int(line[46:48])\n hour = line[49:53]\n locflag = int(line[54:56]) # Location Flag\n lat = float(line[57:67])\n lon = float(line[68:78])\n ele = float(line[79:85])\n stype = int(line[86:88])\n numlev = int(line[89:93])\n pvers = line[94:102]\n\n # wired stuff !?\n if '99' in hour:\n hour = hour.replace('99', '00')\n\n if '99' in day:\n search_h = True\n continue\n\n minutes = int(hour) % 100\n hour = \"%02d\" % (int(hour) // 100)\n\n if minutes > 60 or minutes < 0:\n minutes = 0\n\n elif minutes == 60:\n minutes = 59\n\n else:\n pass\n minutes = \"%02d\" % minutes\n idate = datetime.datetime.strptime(year + month + day + hour + minutes, '%Y%m%d%H%M')\n headers.append((idate, usi, numlev, lat, lon, ele, stype))\n pday = int(day)\n search_h = False\n\n except Exception as e:\n print(\"Error: \", i, line, repr(e), \"Skipping Block:\")\n if kwargs.get('debug', False):\n raise e\n\n search_h = True\n iprev = i\n\n elif search_h:\n nmiss += 1\n continue # Skipping block\n\n else:\n # Data\n ltyp = int(line[0:4])\n press = float(line[5:13]) # hPa\n gph = float(line[14:22])\n temp = float(line[23:29]) # degree\n rh = float(line[30:36]) # %\n wdir = float(line[37:43])\n wspd = float(line[44:50]) # m/s\n raw.append((press, gph, temp, rh, wdir, wspd))\n dates.append(idate)\n\n sp.message(\"UADB Lines read:\", i, \"skipped:\", nmiss, \"Header:\", len(headers), **kwargs)\n\n out = pd.DataFrame(data=raw, index=dates, columns=['pres', 'gph', 'temp', 'rhumi', 'windd', 'winds'])\n out = out.replace([-999.9, -9999, -999, -999.0, -99999.0, -99999.9], np.nan)\n # fix units\n out['pres'] *= 100. # need Pa\n out.index.name = 'date'\n headers = pd.DataFrame(data=headers, columns=['date', 'uid', 'numlev', 'lat', 'lon', 'alt', 'stype']).set_index(\n 'date')\n return out, headers",
"def read_dmp(file, usecols=None, dtype=None, drop_no_st_num=True,\n abbr_addr=True, filter_multiple=False, nrows=None, **kwargs):\n # Define default columns to read from the CSV file\n if usecols is None:\n usecols = ['APN',\n 'SITE_ADDR', 'SITE_CITY', 'SITE_ZIP', 'COUNTY',\n 'LONGITUDE', 'LATITUDE', 'SITE_HOUSE_NUMBER',\n 'USE_CODE_STD_CTGR_DESC', 'USE_CODE_STD_DESC',\n 'YR_BLT', 'DATE_TRANSFER',\n 'BUILDING_SQFT', 'LAND_SQFT']\n # Define the default data type of each column\n if dtype is None:\n dtype = {'APN': str,\n 'SITE_ADDR': str,\n 'SITE_CITY': str,\n 'SITE_ZIP': str,\n 'COUNTY': str,\n 'LONGITUDE': np.float64,\n 'LATITUDE': np.float64,\n 'SITE_HOUSE_NUMBER': str,\n 'USE_CODE_STD_CTGR_DESC': str,\n 'USE_CODE_STD_DESC': str,\n 'YR_BLT': np.float64,\n 'DATE_TRANSFER': str,\n 'BUILDING_SQFT': np.float64,\n 'LAND_SQFT': np.float64}\n # Miscell options\n encoding = 'iso-8859-1'\n engine = 'c'\n\n # Read file\n data = pd.read_csv(file,\n usecols=usecols, dtype=dtype,\n encoding=encoding, engine=engine,\n nrows=nrows, **kwargs)\n # Drop duplicates\n data = data.drop_duplicates()\n\n # Standardize columns spelling for easier merging\n data = data.rename(columns={'APN': 'PropertyID',\n 'SITE_ADDR': 'address',\n 'SITE_CITY': 'city',\n 'SITE_ZIP': 'zip',\n 'COUNTY': 'county',\n 'LONGITUDE': 'Longitude',\n 'LATITUDE': 'Latitude',\n 'YR_BLT': 'year_built',\n 'DATE_TRANSFER': 'date_transfer',\n 'BUILDING_SQFT': 'building_area',\n 'LAND_SQFT': 'land_area'})\n\n # Drop entries that have empty address/city/zip\n for col in ['address', 'city', 'county']:\n if col in data:\n data = data.dropna(subset=[col], axis=0)\n # Standardize the entries of address, city and county to upper case\n for col in ['address', 'city', 'county']:\n if col in data:\n data[col] = data[col].str.upper()\n # Standardize address\n if ('address' in data) and abbr_addr:\n for key in addr_dict:\n data['address'] = data['address'].str.replace(key, addr_dict[key])\n # Extract only the 5-digit zip codes\n if 'zip' in data:\n data['zip'] = data['zip'].str[:5]\n # Typecast dates\n if 'date_transfer' in data:\n data['date_transfer'] = data['date_transfer'].str.split(' ').str[0]\n data['date_transfer'] = pd.to_datetime(data['date_transfer'],\n format='%m/%d/%Y')\n\n # Fix spaces at the end of building types\n for col in ['USE_CODE_STD_CTGR_DESC', 'USE_CODE_STD_DESC']:\n if col in data:\n data[col] = data[col].str.rstrip()\n\n # Get rid of entries that have no street number\n if drop_no_st_num:\n data = data[data['SITE_HOUSE_NUMBER'].notnull()]\n data = data.drop('SITE_HOUSE_NUMBER', axis=1)\n\n # Filter buildings that belong to the same address if selected\n if filter_multiple:\n group_keys = ['address', 'city', 'zip']\n num_bldg = data.groupby(group_keys).size()\n index_pf = num_bldg[num_bldg == 1].index\n data = data.set_index(group_keys).loc[index_pf].reset_index()\n\n return data.reset_index(drop=True)",
"def load_swc(file_name):\n df = pd.read_csv(file_name, sep = ' ', header=None, comment='#', index_col = False,\n names=['sample', 'identifier', 'x', 'y', 'z', 'r', 'parent'],\n skipinitialspace=True)\n return df",
"def load_utlization(path):\n df = pd.read_csv(f\"{raw_data}\\\\{path}\", parse_dates=[\"AdmissionDate\"])\n\n df.rename(\n columns={\"MemberID\": \"member_id\", \"LOSDays\": \"los\", \"FacilityName\": \"facility\"},\n inplace=True,\n )\n\n df.columns = clean_table_columns(df.columns)\n\n facility_col = [col for col in df.columns if \"facility\" in col][0]\n\n df = cognify_facility_changes(df, facility_col)\n\n df = df[df.member_id != 1003]\n return df",
"def read(self):\r\n df = super(TSPReader, self).read()\r\n df.columns = map(lambda x: x.strip(), df.columns)\r\n df.drop(columns=self.all_symbols - set(self.symbols), inplace=True)\r\n return df",
"def abstract_dataframe(filename):\n pmid_ab_dict = medline_parser(filename)\n df = pd.DataFrame.from_dict(pmid_ab_dict, orient='index').reset_index()\n df.columns = ['PMID', 'Abstract']\n \"\"\"\n Parallelized tokenizer and gene pairs functions gene-network analysis.\n returns a dataframe with tokenized abstracts, gene_pairs and labels\n \"\"\"\n # df = parallel_tokenizer(df)\n # df = parallel_genepairs(df)\n \"\"\"create dictionary for networx_work\"\"\"\n df = topic_extraction(df, 'Abstract') # after topic extraction adds labels\n # df.to_csv('with_lda_labels.csv') # uncomment if you want to save the file\n # gene_dict = {entry[0]:entry[1:] for entry in df['gene_pairs'] if entry != None}\n # network_graph(gene_dict) # uncomment if you want to generate a networkx graph\n return df",
"def convert_abundances_format(fname, delimiter=r'\\s+'):\n df = pd.read_csv(fname, delimiter=delimiter, comment='#', header=None)\n # Drop shell index column\n df.drop(df.columns[0], axis=1, inplace=True)\n # Assign header row\n df.columns = [nucname.name(i)\n for i in range(1, df.shape[1] + 1)]\n return df",
"def load_pdbbind_labels(labels_file):\n # Some complexes have labels but no PDB files. Filter these manually\n missing_pdbs = [\"1d2v\", \"1jou\", \"1s8j\", \"1cam\", \"4mlt\", \"4o7d\"]\n contents = []\n with open(labels_file) as f:\n for line in f:\n if line.startswith(\"#\"):\n continue\n else:\n # Some of the ligand-names are of form (FMN ox). Use regex\n # to merge into form (FMN-ox)\n p = re.compile('\\(([^\\)\\s]*) ([^\\)\\s]*)\\)')\n line = p.sub('(\\\\1-\\\\2)', line)\n elts = line.split()\n # Filter if missing PDB files\n if elts[0] in missing_pdbs:\n continue\n contents.append(elts)\n contents_df = pd.DataFrame(\n contents,\n columns=(\"PDB code\", \"resolution\", \"release year\", \"-logKd/Ki\", \"Kd/Ki\",\n \"ignore-this-field\", \"reference\", \"ligand name\"))\n return contents_df",
"def load_swc(file_name):\n\n df = pd.read_csv(file_name, delimiter=' ', header=None, comment='#',\n names=['sample', 'identifier', 'x', 'y', 'z', 'r', 'parent'],\n skipinitialspace=True).astype({'sample':int,'identifier':int,'x':float,'y':float,'z':float,'r':float,'parent':int})\n return df",
"def load_ctffind_4_1_0(file_name: str) -> pd.DataFrame:\n header_names: typing.List[str]\n ctffind_data: pd.DataFrame\n ctffind_meta: pd.DataFrame\n\n header_names = get_ctffind_4_1_0_header_names()\n ctffind_data = util.load_file(\n file_name,\n names=header_names,\n skiprows=5,\n usecols=(1, 2, 3, 4, 5, 6)\n )\n ctffind_data['PhaseShift'] = np.degrees(ctffind_data['PhaseShift'])\n\n ctffind_meta = get_ctffind_4_1_0_meta(file_name=file_name)\n return pd.concat([ctffind_data, ctffind_meta], axis=1)",
"def readDMV(filename):\n import numpy as np\n import pandas as pd\n import xarray as xr\n from collections import OrderedDict\n from ohwhio import getDMVformat\n\n def readTOC(sizeTOC):\n dependentVariables = OrderedDict({})\n dependentVariableRecords = OrderedDict({})\n if (sizeTOC == 40): # RNC, RFC, RLC, ...\n # dependent data information for single-variable file.\n sizeDependentRecord = np.fromfile(f, np.int32, 1)[0]\n formatDependentRecord = np.fromfile(f, np.int32, 1)[0]\n scalingFactorLog = np.fromfile(f, np.int32, 1)[0]\n dependentPrecisionLog = np.fromfile(f, np.int32, 1)[0]\n # independent data information\n independentMinimum = np.fromfile(f, np.float64, 1)[0]\n independentMaximum = np.fromfile(f, np.float64, 1)[0]\n independentPrecisionLog = np.fromfile(f, np.int32, 1)[0]\n # number of attributes for next section.\n numberOfDependentAttributes = np.fromfile(f, np.int32, 1)[0]\n numberOfDependentVariables = 1\n # Now read the attributes for the single variable.\n # Variable name\n nbytes = np.fromfile(f, np.int32, 1)[0]\n variableName = f.read(nbytes).decode('utf-8')\n # Short name\n nbytes = np.fromfile(f, np.int32, 1)[0]\n shortname = f.read(nbytes).decode('utf-8')\n # Short name\n nbytes = np.fromfile(f, np.int32, 1)[0]\n longname = f.read(nbytes).decode('utf-8')\n # Units\n nbytes = np.fromfile(f, np.int32, 1)[0]\n units = f.read(nbytes).decode('utf-8')\n # Precision\n precision = \"{:.0E}\".format(10 ** dependentPrecisionLog)\n # Now add this to the data variable dictionary.\n dependentVariables.update({variableName: OrderedDict([('longname', longname),\n ('units', units),\n ('precision', precision)])})\n dependentVariableRecords.update({variableName: OrderedDict([('sizeDependentRecord', sizeDependentRecord),\n ('formatDependentRecord', formatDependentRecord),\n ('scalingFactorLog', scalingFactorLog),\n ('dependentPrecisionLog', dependentPrecisionLog),\n ('identifier', identifier),\n ('independentMinimum', independentMinimum),\n ('independentMaximum', independentMaximum),\n ('numberOfDependentAttributes', numberOfDependentAttributes),\n ('numberOfDependentVariables', numberOfDependentVariables)])})\n elif (sizeTOC == 48): # CXS, CSV, CVS, UVS, SUM, ...\n Continuation = -1 # Non-zero to start loop.\n while (Continuation):\n # dependent data information\n sizeDependentRecord = np.fromfile(f, np.int32, 1)[0]\n formatDependentRecord = np.fromfile(f, np.int32, 1)[0]\n scalingFactorLog = np.fromfile(f, np.int32, 1)[0]\n dependentPrecisionLog = np.fromfile(f, np.int32, 1)[0]\n # independent data information\n independentMinimum = np.fromfile(f, np.float64, 1)[0]\n independentMaximum = np.fromfile(f, np.float64, 1)[0]\n independentPrecisionLog = np.fromfile(f, np.int32, 1)[0]\n # additional data to support multiple variables\n identifier = np.fromfile(f, np.int32, 1)[0]\n Continuation = np.fromfile(f, np.int32, 1)[0]\n # number of attributes for next section.\n numberOfDependentAttributes = np.fromfile(f, np.int32, 1)[0]\n numberOfDependentVariables = identifier + Continuation\n # Now read the attributes for the single variable.\n # Variable name\n nbytes = np.fromfile(f, np.int32, 1)[0]\n variableName = f.read(nbytes).decode('utf-8')\n # Short name\n nbytes = np.fromfile(f, np.int32, 1)[0]\n shortname = f.read(nbytes).decode('utf-8')\n # Short name\n nbytes = np.fromfile(f, np.int32, 1)[0]\n longname = f.read(nbytes).decode('utf-8')\n # Units\n nbytes = np.fromfile(f, np.int32, 1)[0]\n units = f.read(nbytes).decode('utf-8')\n # Precision\n precision = \"{:.0E}\".format(10 ** dependentPrecisionLog)\n # Now add this to the data variable dictionary.\n dependentVariables.update({variableName: OrderedDict([('longname', longname),\n ('units', units),\n ('precision', precision)])})\n dependentVariableRecords.update({variableName: OrderedDict([('sizeDependentRecord', sizeDependentRecord),\n ('formatDependentRecord', formatDependentRecord),\n ('scalingFactorLog', scalingFactorLog),\n ('dependentPrecisionLog', dependentPrecisionLog),\n ('identifier', identifier),\n ('independentMinimum', independentMinimum),\n ('independentMaximum', independentMaximum),\n ('numberOfDependentAttributes', numberOfDependentAttributes),\n (\n 'numberOfDependentVariables', numberOfDependentVariables)])})\n else:\n print('Erroneous size of Table of Contents!! Something is strange with your DMV file!!')\n return (sizeTOC)\n\n return dependentVariables, dependentVariableRecords\n\n def DMVfileStructure(filename):\n '''Determines the structure for DMV files.\n\n Input:\n filename - DMV file name\n\n Output:\n recordSize - size of data records in bytes for each measurement in time\n variableOffset - offset (in floats) to where the variables start\n dataOffset - offset (in float values) to where data starts\n\n Notes:\n Determine number of data records for each time step.\n factor of 5 is the number of measurements: BB1-BB2-scene-BB2-BB1\n numberOfDependentVariableBytes is the cumulative number of bytes for all dependent variables\n factor of 4 is the number of bytes in each number.\n '''\n ext = filename.split('.')[-1]\n\n # Determine the cumulative number of bytes in the dependent variables.\n numberOfDependentVariableBytes = np.array([dependentVariableRecords[v]['sizeDependentRecord'] for v in dependentVariableRecords]).sum()\n\n # Determine the record size, variable offset and data offset based on file type.\n # ....RNC ######################################################################################################\n if ((ext == 'RNC') | (ext == 'rnc')):\n channel = filename.split('.')[0][-1]\n if channel == '1':\n nvars = 79\n else:\n nvars = 71\n nvarsExtra1 = 14\n nvarsExtra2 = 22\n\n recordSize = ((nvars * 5) + nvarsExtra1 + (nvars * 5) + nvarsExtra2) * 4 + numberOfDependentVariableBytes\n variableOffset = (nvars * 4) + (nvars + nvarsExtra1) + (nvars * 4)\n dataOffset = [(nvars * 4) + (nvars + nvarsExtra1) + (nvars * 4) + (nvars + nvarsExtra2)]\n # ....RFC and RLC ######################################################################################################\n elif ((ext == 'RLC') | (ext == 'rlc') | (ext == 'RFC') | (ext == 'rfc')):\n channel = filename.split('.')[0][-1]\n typ = filename.split('.')[0][-2:-1]\n if (typ == 'B'):\n scanDirection = 'Backward'\n elif(typ == 'F'):\n scanDirection = 'Forward'\n else:\n scanDirection = 'Both' # C1 or C2\n\n if ((scanDirection=='Backward') | (scanDirection=='Forward')): # Backward and Forward\n if channel == '1':\n nvars = 79\n else:\n nvars = 71\n nvarsExtra = 14\n\n recordSize = (nvars * 4)*4 + (nvars + nvarsExtra)*4 + numberOfDependentVariableBytes\n variableOffset = nvars * 4\n dataOffset = [(nvars * 5) + nvarsExtra]\n else: # Both (C1 or C2)\n if channel == '1':\n nvars = 79\n else:\n nvars = 71\n nvarsExtra1 = 14\n nvarsExtra2 = 15\n\n recordSize = ((nvars * 4) + (nvars + nvarsExtra1) + (nvars * 4) + (nvars + nvarsExtra2)) * 4 + numberOfDependentVariableBytes\n variableOffset = (nvars * 4) + (nvars + nvarsExtra1) + (nvars * 4)\n dataOffset = [(nvars * 4) + (nvars + nvarsExtra1) + (nvars * 4) + (nvars + nvarsExtra2)]\n # ....CXS ######################################################################################################\n elif ((ext == 'CXS') | (ext == 'cxs')):\n nvars = 71\n nvarsExtra1 = 0\n nvarsExtra2 = 0\n channel = filename.split('.')[0][-1]\n typ = filename.split('.')[0][-2:-1]\n if (typ == 'B'):\n scanDirection = 'Backward'\n else:\n scanDirection = 'Forward'\n\n # Special case for Channel 1, Forward direction, which contains 104 extra variables of 28 bytes each.\n if ((channel == '1') & (scanDirection == 'Forward')):\n extraBytes = np.array([dependentVariableRecords[v]['sizeDependentRecord'] for v in dependentVariableRecords])[2:].sum()\n # Now drop all of the extra dependent variables except the real and imag spectra.\n vs = [variable for variable in dependentVariables]\n for v in vs[2:]:\n dependentVariables.pop(v);\n dependentVariableRecords.pop(v);\n numberOfDependentVariableBytes = numberOfDependentVariableBytes - extraBytes\n else:\n extraBytes = 0\n # print(numberOfDependentVariableBytes, extraBytes)\n recordSize = (nvars * 4) + numberOfDependentVariableBytes + extraBytes\n variableOffset = 0\n dataOffset = [nvars]\n for v in dependentVariableRecords:\n dataOffset.append(dataOffset[-1] + int(dependentVariableRecords[v]['sizeDependentRecord']/4))\n dataOffset.pop();\n # ....CXV ######################################################################################################\n elif ((ext == 'CXV') | (ext == 'cxv')):\n nvars = 79\n nvarsExtra1 = 0\n nvarsExtra2 = 0\n channel = filename.split('.')[0][-1]\n typ = filename.split('.')[0][-2:-1]\n if (typ == 'B'):\n scanDirection = 'Backward'\n else:\n scanDirection = 'Forward'\n\n # Special case for Channel 1, Forward direction, which contains 104 extra variables of 28 bytes each.\n if ((channel == '1') & (scanDirection == 'Forward')):\n extraBytes = np.array([dependentVariableRecords[v]['sizeDependentRecord'] for v in dependentVariableRecords])[2:].sum()\n # Now drop all of the extra dependent variables except the real and imag spectra.\n vs = [variable for variable in dependentVariables]\n for v in vs[2:]:\n dependentVariables.pop(v);\n dependentVariableRecords.pop(v);\n numberOfDependentVariableBytes = numberOfDependentVariableBytes - extraBytes\n else:\n extraBytes = 0\n # print(numberOfDependentVariableBytes, extraBytes)\n recordSize = (nvars * 4) + numberOfDependentVariableBytes + extraBytes\n variableOffset = 0\n dataOffset = [nvars]\n for v in dependentVariableRecords:\n dataOffset.append(dataOffset[-1] + int(dependentVariableRecords[v]['sizeDependentRecord']/4))\n dataOffset.pop();\n # ....SUM ######################################################################################################\n elif ((ext == 'SUM') | (ext == 'sum')):\n # Handles a special case where the format of the SUM files changed\n # probably because AERI.xml was changed during ICECAPS.\n yy = filename.split('.')[-2][-6:-4]\n if int(yy)>96:\n yymmdd = '19' + filename.split('.')[-2][-6:]\n else:\n yymmdd = '20' + filename.split('.')[-2][-6:]\n if pd.to_datetime(yymmdd) < pd.to_datetime('20110707'):\n recordSize = 9776\n else:\n recordSize = 9744\n nvars = 144\n variableOffset = 1479\n dataOffset = [variableOffset + nvars]\n for v in dependentVariableRecords:\n dataOffset.append(dataOffset[-1] + int(dependentVariableRecords[v]['sizeDependentRecord']/4))\n dataOffset.pop();\n else:\n print('ERROR: Incorrect file type. Try again...')\n return {}\n\n numberOfRecords = int((eof - headerSize + 1) / recordSize)\n numberOfValues = int(recordSize / 4)\n\n return {'recordSize': recordSize,\n 'variableOffset': variableOffset,\n 'dataOffset': dataOffset,\n 'numberOfRecords': numberOfRecords,\n 'numberOfValues': numberOfValues,\n 'numberOfVariables': nvars\n }\n\n def determineWavenumberScales(filename):\n ext = filename.split('.')[-1]\n vs = [variable for variable in dependentVariableRecords]\n\n if ((ext == 'RNC') | (ext == 'rnc') | (ext == 'RFC') | (ext == 'rfc') | (ext == 'RLC') | (ext == 'rlc') | (ext == 'CXS') | (ext == 'cxs') | (ext == 'CXV') | (ext == 'cxv')):\n v = vs[0]\n bwn = dependentVariableRecords[v]['independentMinimum']\n ewn = dependentVariableRecords[v]['independentMaximum']\n nwn = int(dependentVariableRecords[v]['sizeDependentRecord'] / 4)\n wnum1 = np.linspace(bwn, ewn, nwn, dtype=np.float64)\n\n # Add the wavenumber scale as a variable to the xarray dataset.\n ds[wavenumberScales[v]] = wnum1.astype(np.float64)\n ds[wavenumberScales[v]].attrs['longname'] = 'Wavenumber in reciprocal centimeters'\n ds[wavenumberScales[v]].attrs['units'] = 'centimeter^-1'\n ds[wavenumberScales[v]].attrs['precision'] = '1E-4'\n ds[wavenumberScales[v]].attrs['range_of_values'] = '[ ' + str(bwn) + ', ' + str(ewn) + ' ]'\n elif((ext == 'SUM') | (ext == 'sum')):\n for v in ['ResponsivitySpectralAveragesCh1', 'ResponsivitySpectralAveragesCh2', 'SkyVariabilityAveragesCh1', 'SkyVariabilityAveragesCh2', 'SkyRadianceSpectralAveragesCh1', 'SkyRadianceSpectralAveragesCh2']:\n bwn = dependentVariableRecords[v]['independentMinimum']\n ewn = dependentVariableRecords[v]['independentMaximum']\n nwn = int(dependentVariableRecords[v]['sizeDependentRecord'] / 4)\n wnum1 = np.linspace(bwn, ewn, nwn, dtype=np.float64)\n # Add the wavenumber scale as a variable to the xarray dataset.\n ds[wavenumberScales[v]] = wnum1.astype(np.float64)\n ds[wavenumberScales[v]].attrs['longname'] = 'Wavenumber in reciprocal centimeters'\n ds[wavenumberScales[v]].attrs['units'] = 'centimeter^-1'\n ds[wavenumberScales[v]].attrs['precision'] = '1E-4'\n ds[wavenumberScales[v]].attrs['range_of_values'] = '[ ' + str(bwn) + ', ' + str(ewn) + ' ]'\n else:\n print('ERROR: Incorrect file type. Try again...')\n return {}\n\n return\n\n # Opens the DMV file.\n f = open(filename, 'rb')\n\n # Determine the file size by searching for the end-of-file; eof.\n eof = f.seek(-1, 2) # go to the file end and record byte value\n\n # Determine header size, then skip to beginning of data records.\n f.seek(0)\n\n # Read the header.\n headerSize = int(f.readline().decode('utf-8'))\n f.seek(0)\n FileHistory = f.read(headerSize).decode('utf-8')\n\n # Decode dependent variables that are associated with the data in the particular file.\n ID = f.read(12).decode('utf-8')\n # f.seek(12,1) # Skip the 12-byte identifier, \"SSECRGD \".\n sizeTOC = np.fromfile(f, np.int32, 1)[0]\n dependentVariables, dependentVariableRecords = readTOC(sizeTOC)\n\n # Determine independent variables.\n variables, wavenumberScales = getDMVformat(filename)\n variables.update(dependentVariables) # Append dependent variables to list of variables\n\n # Read the next 4 bytes; not sure what these bytes are, but they aren't part of the data records.\n nbytes = np.fromfile(f, np.int32, 1)[0]\n np.fromfile(f, np.int32, nbytes) # Skip these bytes until I figure out what they represent...\n\n # Read data in as a float32 array; all RNC variables are float32.\n arr = np.fromfile(f, np.float32)\n f.close()\n\n # Determine file structure.\n fileStructure = DMVfileStructure(filename)\n\n # Decode the base_time from the filename.\n base_time = pd.to_datetime('20' + filename.split('/')[-1][0:2] + '-' + filename.split('/')[-1][2:4] + '-' + filename.split('/')[-1][4:6])\n Time = arr[fileStructure['variableOffset']::fileStructure['numberOfValues']]\n\n # Create a Pandas dataframe for all independent variables.\n df = pd.DataFrame({}, index=base_time + pd.to_timedelta(Time, unit='h'))\n df.index.name = 'time'\n for offset, variable in enumerate(variables):\n if (offset >= fileStructure['numberOfVariables']): break\n df[variable] = arr[fileStructure['variableOffset'] + offset::fileStructure['numberOfValues']]\n\n # Creates an xarray dataset from the Pandas dataframe.\n ds = xr.Dataset().from_dataframe(df)\n # Determines the wavenumbers scales and adds them to the xarray dataset.\n determineWavenumberScales(filename)\n\n # Add data for dependent variables.\n for variable, offset in zip(dependentVariables, fileStructure['dataOffset']):\n ds[variable] = xr.DataArray(np.array(\n [arr[int((record * fileStructure['recordSize'] / 4) + offset):int((record * fileStructure['recordSize'] / 4) + offset + len(ds[wavenumberScales[variable]]))] for record in range(fileStructure['numberOfRecords'])]),\n coords=[df.index, ds[wavenumberScales[variable]].data],\n dims=['time', wavenumberScales[variable]])\n # Global attributes\n ds['FileHistory'] = FileHistory\n # base_time\n ds['base_time'] = np.int32(\n (base_time - pd.to_datetime('1970-01-01') + pd.Timedelta(Time[0], unit='h')).total_seconds())\n ds['base_time'].attrs['longname'] = 'Base time in Epoch'\n ds['base_time'].attrs['date'] = df.index[0].strftime('%Y-%m-%d,%H:%M:%S GMT')\n # date\n ds['date'] = np.int32(filename.split('/')[-1][0:6])\n # time_offset\n ds['time_offset'] = np.array(\n [(pd.Timedelta(time, unit='h') - pd.Timedelta(Time[0], unit='h')).total_seconds() for time in Time])\n ds['time_offset'].attrs['longname'] = 'Time offset from base_time'\n\n # Adds attributes for each independent variable.\n for offset, variable in enumerate(variables):\n if (offset >= fileStructure['numberOfVariables']): break\n for attribute in variables[variable]:\n ds[variable].attrs[attribute] = variables[variable][attribute]\n\n # Adds attributes for each dependent variable.\n for variable in dependentVariables:\n for attribute in variables[variable]:\n ds[variable].attrs[attribute] = variables[variable][attribute]\n\n return ds",
"def do_it_all(path):\n df = pd.DataFrame(columns=['eyr', 'byr', 'iyr',\n 'hgt', 'hcl', 'ecl', 'pid', 'cid'])\n with open(path, 'r') as f:\n identities = f.read().split('\\n\\n')\n for idx in range(len(identities)):\n word = identities[idx].split('\\n')\n mydict = break_line(word)\n mydict = list_to_dict(':', mydict)\n temp_df = pd.DataFrame.from_dict({idx: mydict})\n temp_df = temp_df.T\n df = pd.concat([df, temp_df])\n return df",
"def load_data(fn):\n return pandas.read_csv(fn, dtype={'Name': str, 'Reason': str, 'Amount': float, 'Day': int})",
"def read(filename, replace_columns=True):\n f = open(filename)\n lines = f.readlines()\n f.close()\n\n # Extract column names from the odt file.\n for i, line in enumerate(lines):\n if line.startswith('# Columns:'):\n columns = []\n odt_section = i # Should be removed after runs are split.\n for part in re.split('Oxs_|Anv_|Southampton_', line)[1:]:\n for char in [\"{\", \"}\", \" \", \"\\n\"]:\n part = part.replace(char, '')\n if replace_columns:\n if part in columns_dic.keys():\n columns.append(columns_dic[part])\n else:\n msg = \"Entry {} not in lookup table.\".format(part)\n raise ValueError(msg)\n else:\n columns.append(part)\n\n # Extract units from the odt file.\n for i, line in enumerate(lines):\n if line.startswith('# Units:'):\n units = line.split()[2:]\n\n # Extract the data from the odt file.\n data = []\n for i, line in enumerate(lines[odt_section:]):\n if not line.startswith(\"#\"):\n data.append([float(number) for number in line.split()])\n\n df = pd.DataFrame(data, columns=columns)\n # next line is required to allow adding list-like attribute to pandas DataFrame\n # see https://github.com/pandas-dev/pandas/blob/2f9d4fbc7f289a48ed8b29f573675cd2e21b2c89/pandas/core/generic.py#L3631\n df._metadata.append('units')\n df.units = dict(zip(columns, units))\n return df"
] | [
"0.68054324",
"0.5834878",
"0.5798597",
"0.5793068",
"0.57881135",
"0.57818484",
"0.5780603",
"0.5767426",
"0.5762671",
"0.5683659",
"0.5658958",
"0.5644294",
"0.56357336",
"0.557491",
"0.55712473",
"0.55641025",
"0.55571115",
"0.5556157",
"0.5546592",
"0.55335677",
"0.55061173",
"0.55001163",
"0.5494561",
"0.5486566",
"0.54753184",
"0.54725814",
"0.5439703",
"0.54389447",
"0.54334396",
"0.54282403"
] | 0.7467837 | 0 |
Checks authorization of a rule against the target in this context. This function is not to be called directly. Calling the function with a target that evaluates to None may result in policy bypass. Use 'authorize_on_' calls instead. | def __authorize(context, rule, target=None):
target = target or {'tenant': context.tenant}
return get_enforcer().authorize(
rule, target, context.to_dict(), do_raise=True,
exc=trove_exceptions.PolicyNotAuthorized, action=rule) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wrap_check_policy(func):\n @functools.wraps(func)\n def wrapped(self, context, target_obj, *args, **kwargs):\n check_policy(context, func.__name__, target_obj)\n return func(self, context, target_obj, *args, **kwargs)\n\n return wrapped",
"def authorization_rule(self) -> Optional[pulumi.Input['EventhubSpecAuthorizationRuleArgs']]:\n return pulumi.get(self, \"authorization_rule\")",
"def check_access(permission):\n def validate(func, self, *args, **kwargs):\n if u'REMOTE_USER' in session:\n user = Session.query(Users).get(session[u'REMOTE_USER'])\n if user.has_access(permission):\n return func(self, *args, **kwargs)\n else:\n h.flash.set_message(u'You don\\'t have access to that area.', 'error')\n h.redirect(h.url('/'))\n #h.redirect_to(u'/')\n else:\n return func(self, *args, **kwargs)\n return decorator(validate)",
"def _enforce(self, req, action, target=None):\n if target is None:\n target = {}\n try:\n self.policy.enforce(req.context, action, target)\n except exception.Forbidden as e:\n LOG.debug(\"User not permitted to perform '%s' action\", action)\n raise webob.exc.HTTPForbidden(explanation=e.msg, request=req)",
"def enforce(predicate, request, msg=None, denial_handler=None):\n if not_met(predicate, request):\n denial = _AuthorizationDenial(msg, denial_handler)\n raise denial",
"def acl_check_entity(self, entity, auth_context, op, obj):\n acl_check = (\n entity.acl_check(auth_context, op, obj)\n if entity.has_acl()\n else self.default_acl.acl_check(auth_context, op, obj))\n if not acl_check:\n raise exceptions.AclError(\n 'unauthorized change to %s' % (\n entity.name,))",
"def autz_required(permission, context=None):\n def decorator(func):\n\n @wraps(func)\n async def wrapper(*args):\n request = (args[-1].request\n if isinstance(args[-1], web.View)\n else args[-1])\n\n if await autz.permit(request, permission, context):\n return await func(*args)\n\n raise web.HTTPForbidden()\n\n return wrapper\n\n return decorator",
"def authorize(context, action, target, do_raise=True):\n init()\n credentials = context.to_policy_values()\n try:\n result = _ENFORCER.authorize(action, target, credentials,\n do_raise=do_raise, action=action)\n return result\n except policy.PolicyNotRegistered:\n LOG.exception('Policy not registered')\n raise\n except Exception:\n LOG.debug('Policy check for %(action)s failed with credentials '\n '%(credentials)s',\n {'action': action, 'credentials': credentials})\n raise",
"def authorize(context, action, target=None, do_raise=True, exc=None):\n init()\n if not exc:\n exc = exception.PolicyNotAuthorized\n\n # Legacy fallback for emtpy target from context.can()\n # should be removed once we improve testing and scope checks\n if target is None:\n target = default_target(context)\n\n try:\n result = _ENFORCER.authorize(action, target, context,\n do_raise=do_raise, exc=exc, action=action)\n except policy.PolicyNotRegistered:\n with excutils.save_and_reraise_exception():\n LOG.exception('Policy not registered')\n except policy.InvalidScope:\n LOG.debug('Policy check for %(action)s failed with scope check '\n '%(credentials)s',\n {'action': action,\n 'credentials': context.to_policy_values()})\n raise exc(action=action)\n except Exception:\n with excutils.save_and_reraise_exception():\n LOG.debug('Policy check for %(action)s failed with credentials '\n '%(credentials)s',\n {'action': action,\n 'credentials': context.to_policy_values()})\n return result",
"def check_acl(func):\n\n @wraps(func)\n def decorated_view(*args, **kwargs):\n if request.method in EXEMPT_METHODS: # pragma: no cover\n return func(*args, **kwargs)\n # 'func' is a Flask.view.MethodView so we have access to some special\n # params\n cls = func.view_class\n login_required = getattr(cls, \"login_required\", True)\n if (\n bui.auth != \"none\"\n and login_required\n and not bui.config.get(\"LOGIN_DISABLED\", False)\n ):\n if current_user.is_anonymous:\n abort(403)\n return func(*args, **kwargs)\n\n return decorated_view",
"def __call__(self, target, creds, enforcer):\n\n return creds['is_admin'] == self.expected",
"def __call__(self, target, creds, enforcer):\n\n return creds['is_admin'] == self.expected",
"def action_allowed_for(user, permission):\n if user is None or not user.is_authenticated:\n return False\n\n assert permission in amo.permissions.PERMISSIONS_LIST # constants only.\n return any(\n match_rules(group.rules, permission.app, permission.action)\n for group in user.groups_list\n )",
"def __call__(self, target, creds):\n\n return creds['is_admin'] == self.expected",
"def authorize(self, req):\n try:\n version, account, container, obj = split_path(req.path, 1, 4, True)\n except ValueError:\n return HTTPNotFound(request=req)\n if not account or not account.startswith(self.reseller_prefix):\n return self.denied_response(req)\n user_groups = (req.remote_user or '').split(',')\n if '.reseller_admin' in user_groups:\n return None\n if account in user_groups and \\\n (req.method not in ('DELETE', 'PUT') or container):\n # If the user is admin for the account and is not trying to do an\n # account DELETE or PUT...\n return None\n referrers, groups = parse_acl(getattr(req, 'acl', None))\n if referrer_allowed(req.referer, referrers):\n return None\n if not req.remote_user:\n return self.denied_response(req)\n for user_group in user_groups:\n if user_group in groups:\n return None\n return self.denied_response(req)",
"def authorize(self, action, author_id=None):\n if Serializable.authorize(self, action, author_id=author_id):\n return self.admin_id == author_id\n return False",
"def authorize(self, action, author_id=None):\n return False",
"def authorize(self, action, author_id=None):\n if Identity.authorize(self, action, author_id=author_id):\n return (self.id == author_id)\n return False",
"def authorize(self, action, author_id=None):\n if Serializable.authorize(self, action, author_id=author_id):\n return (self.id == author_id)\n return False",
"def authorize(self, action, author_id=None):\n if Serializable.authorize(self, action, author_id=author_id):\n return author_id == self.author.id\n return False",
"def appliesTo(self, n):\n\n if self.accept_rules:\n accepted = any([predicate(n) for predicate in self.accept_rules])\n else:\n accepted = True\n\n denied = any([predicate(n) for predicate in self.deny_rules])\n\n return accepted and not denied",
"def evaluate(self, request_info: RequestInfo):\n rule_results = [(rule, rule.matches(request_info)) for rule in self.rules]\n\n overriding_blocking_rules = [\n rule for rule, result in rule_results\n if rule.action == Action.DENY and result == MatchResult.OVERRIDE\n ]\n overriding_allowing_rules = [\n rule for rule, result in rule_results\n if rule.action == Action.ALLOW and result == MatchResult.OVERRIDE\n ]\n\n if len(overriding_allowing_rules) > 0:\n return Action.ALLOW, overriding_allowing_rules\n\n if len(overriding_blocking_rules) > 0:\n return Action.DENY, overriding_blocking_rules\n\n blocking_rules = [\n rule for rule, result in rule_results\n if rule.action == Action.DENY and result == MatchResult.MATCH\n ]\n allowing_rules = [\n rule for rule, result in rule_results\n if rule.action == Action.ALLOW and result == MatchResult.MATCH\n ]\n\n if len(allowing_rules) > 0:\n return Action.ALLOW, allowing_rules\n\n if len(blocking_rules) > 0:\n return Action.DENY, blocking_rules\n\n return Action.NOOP, None",
"def check(self,):\n self.is_valid_according_policy()",
"def authorize(\n context: PolicyContext, resource: str, operation: str, selector: str = \"\",\n) -> Scope:\n request = AccessRequest(\n resource=resource, operation=operation, selector=selector\n )\n scope, authorized = check_permission(context, request)\n if not authorized:\n raise NotEnoughPrivilegesErr(request)\n return scope",
"def check_is_admin(context):\n\n init()\n # the target is user-self\n target = default_target(context)\n return _ENFORCER.authorize('context_is_admin', target, context)",
"def check_authorized(f):\n @functools.wraps(f)\n def wrapper(self, addr, request):\n if not self.sessions[addr].get(\"authorized\"):\n return Header.ERROR, Error.FORBIDDEN_REQUEST\n else:\n return f(self, addr, request)\n\n return wrapper",
"def authorization_check(input_inheritage_datum):\n\n legal_entity = Get(GetContext, input_inheritage_datum)\n\n legal_entity_is_authorized = CheckWitness(legal_entity) # Boolean\n\n if legal_entity_is_authorized:\n print('Authorization confirmed.')\n \n else:\n print('Authorization failed.')\n\n return legal_entity_is_authorized",
"def enforce_policy(self, method_name, request):\n context_dict = request.context.to_dict()\n if method_name in [\"detail\", \"get_all\", \"get_one\"]:\n policy.authorize(event_log_policy.POLICY_ROOT % \"get\", {},\n context_dict)\n else:\n raise exceptions.PolicyNotFound()",
"def check_is_admin(context):\n init()\n\n #the target is user-self\n credentials = context.to_dict()\n target = credentials\n\n return policy.check('context_is_admin', target, credentials)",
"def cancan(self, f):\n return uncan(can(f), self.user_ns)"
] | [
"0.59729636",
"0.58791226",
"0.57168037",
"0.56584775",
"0.55937326",
"0.5568693",
"0.55458856",
"0.55406976",
"0.5506479",
"0.550026",
"0.5477174",
"0.5477174",
"0.5420243",
"0.53873324",
"0.5317387",
"0.5297428",
"0.5296398",
"0.52627957",
"0.52248955",
"0.5222644",
"0.520844",
"0.52020323",
"0.51992697",
"0.5170374",
"0.5166542",
"0.5155868",
"0.5148708",
"0.5133476",
"0.512313",
"0.5112348"
] | 0.7202618 | 0 |
'To assume as true in the absence of proof to the contrary.' Returns a modified transaction with this value set if the value of the item is not already known. If a value has already been fetched or presumed, this will be a noop. If modified, the presumed value will be available via `get`, and will additionally check your presumed value against the table when the transaction is run. At runtime (within the context of `versioned_transact_write_items`), this is purely a cost optimization to avoid fetching an item for which you believe you already know the value. Whether your presumption is right, wrong, or you don't presume anything, your transaction builder will result in exactly the same data written to the table. As with any item fetched or presumed, if the item turns out to have a different value in the table than you presumed when the transaction is committed, the transaction will restart, the item will be freshly fetched like usual, and your use of presumption will have no ultimate effect on the data. For unit testing, this is the approved approach for setting up a VersionedTransaction 'fixture', where you declare the state of the database before your transaction builder is run. Set a presumed value for every item that you will attempt to `get` or `require` within your transaction builder, otherwise your test will error with ItemUndefinedException. | def presume(
transaction: VersionedTransaction,
table: TableNameOrResource,
item_key: ItemKey,
item_value: Optional[Item],
) -> VersionedTransaction:
if item_value is not None:
for key_attr, key_val in item_key.items():
assert item_value[key_attr] == key_val, "Item key must match in a non-nil item value"
table_name = _table_name(table)
if table_name in transaction.tables:
table_data = transaction.tables[table_name]
else:
table_data = _TableData(
items=dict(), effects=dict(), key_attributes=standard_key_attributes(*item_key.keys())
)
hkey = hashable_key(item_key)
if hkey not in table_data.items:
return VersionedTransaction(
tables={
**transaction.tables,
table_name: _TableData(
items={**table_data.items, hkey: item_value},
effects=table_data.effects,
key_attributes=table_data.key_attributes,
),
}
)
return transaction | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_batch_get_lazy_load():\n t = VersionedTransaction(dict())\n table_a = ItemTable(\"a\")\n table_b = ItemTable(\"b\")\n\n a1_k = dict(id=\"a1\")\n a2_k = dict(id=\"a2\")\n b1_k = dict(id=\"b1\")\n\n a3_k = dict(id=\"a3\")\n\n def triple_get(t: VersionedTransaction) -> VersionedTransaction:\n a1 = table_a.get(a1_k)(t)\n b1 = table_b.get(b1_k)(t)\n a2 = table_a.get(a2_k)(t)\n # all three of the above gets will be performed\n # together as a single call to batch_get.\n return table_a.put(dict(a3_k, items=[a1, b1, a2]))(t)\n\n calls = 0\n a1 = dict(a1_k, i=6)\n a2 = dict(a2_k, i=8)\n b1 = dict(b1_k, j=4)\n\n def batch_get(item_keys_by_table_name):\n if not item_keys_by_table_name:\n return dict()\n nonlocal calls\n calls += 1\n return dict(a=[a1, a2], b=[b1])\n\n t = versioned_transact_write_items(\n triple_get,\n batch_get_item=batch_get, # type: ignore\n transact_write_items=lambda **_kw: None,\n )\n\n assert calls == 1\n assert table_a.require(a3_k)(t)[\"items\"] == [a1, b1, a2]",
"def test_preserve_changes(self):\n\n mapper(Order, orders, properties = {\n 'userident':deferred(orders.c.user_id, group='primary'),\n 'description':deferred(orders.c.description, group='primary'),\n 'opened':deferred(orders.c.isopen, group='primary')\n })\n sess = create_session()\n o = sess.query(Order).get(3)\n assert 'userident' not in o.__dict__\n o.description = 'somenewdescription'\n assert o.description == 'somenewdescription'\n def go():\n assert o.opened == 1\n self.assert_sql_count(testing.db, go, 1)\n assert o.description == 'somenewdescription'\n assert o in sess.dirty",
"def test_vault_update_vault_item(self):\n pass",
"def mock_transact_write_items(self):\n\n def put_item(item):\n name = item[\"TableName\"]\n record = item[\"Item\"]\n return self.dynamodb_backend.put_item(name, record)\n\n def delete_item(item):\n name = item[\"TableName\"]\n keys = item[\"Key\"]\n return self.dynamodb_backend.delete_item(name, keys)\n\n def update_item(item):\n name = item[\"TableName\"]\n key = item[\"Key\"]\n update_expression = item.get(\"UpdateExpression\")\n attribute_updates = item.get(\"AttributeUpdates\")\n expression_attribute_names = item.get(\"ExpressionAttributeNames\", {})\n expression_attribute_values = item.get(\"ExpressionAttributeValues\", {})\n return self.dynamodb_backend.update_item(\n name, key, update_expression, attribute_updates, expression_attribute_names, expression_attribute_values,\n )\n\n transact_items = self.body[\"TransactItems\"]\n\n for transact_item in transact_items:\n if \"Put\" in transact_item:\n put_item(transact_item[\"Put\"])\n elif \"Update\" in transact_item:\n update_item(transact_item[\"Update\"])\n elif \"Delete\" in transact_item:\n delete_item(transact_item[\"Delete\"])\n\n return dynamo_json_dump({})",
"def test_version_upgrade_nonpersistent(self):\n\n db_file = self.mktemp()\n\n db = Database.TestDB(db_file)\n yield db.open()\n yield db.execute(\"INSERT into TESTTYPE (KEY, VALUE) values (:1, :2)\", (\"FOO\", \"BAR\",))\n items = (yield db.query(\"SELECT * from TESTTYPE\"))\n self.assertEqual(items, ((\"FOO\", \"BAR\"),))\n db.close()\n db = None\n\n db = Database.TestDBRecreateUpgrade(db_file)\n yield self.inlineCallbackRaises(Database.TestDBRecreateUpgrade.RecreateDBException, db.open)\n items = (yield db.query(\"SELECT * from TESTTYPE\"))\n self.assertEqual(items, ())\n db.close()",
"def test_version_upgrade_persistent(self):\n db_file = self.mktemp()\n db = Database.TestDB(db_file, persistent=True)\n yield db.open()\n yield db.execute(\"INSERT into TESTTYPE (KEY, VALUE) values (:1, :2)\", (\"FOO\", \"BAR\",))\n items = (yield db.query(\"SELECT * from TESTTYPE\"))\n self.assertEqual(items, ((\"FOO\", \"BAR\"),))\n db.close()\n db = None\n\n db = Database.TestDBRecreateUpgrade(db_file, persistent=True)\n yield self.inlineCallbackRaises(NotImplementedError, db.open)\n self.assertTrue(os.path.exists(db_file))\n db.close()\n db = None\n\n db = Database.TestDB(db_file, persistent=True)\n yield db.open()\n items = (yield db.query(\"SELECT * from TESTTYPE\"))\n self.assertEqual(items, ((\"FOO\", \"BAR\"),))\n db.close()",
"def put_item(key, value):\n try:\n response = table.put_item( Item={ 'my-key': key, 'some-other-key': value )\n print(f\"Successfully added new item\")\n print(f\"Response : {response}\")\n except ClientError as ce:\n print(f\"Failed to creat new item - key : {key}, value : {value}\")\n print(ce)\n\ndef update_nested_item(key, value):\n \"\"\"\n Update a nested item. create \n \"\"\"\n try:\n response = table.update_item( Key={ 'my-key': key },\n UpdateExpression='SET #other-key = :new_value',\n ExpressionAttributeNames={\n '#other-key': 'New-Key'\n },\n ExpressionAttributeValues={ ':new_value': True },\n ReturnValues='ALL_NEW'\n )\n print(\"Successfully created/updated item.\")\n print(f\"Response : {response}\")\n except ClientError as ce:\n print(f\"Failed to update item : {ce}\")",
"def test_update_item(self, default_ms):\r\n self.initdb(default_ms)\r\n course = self.store.get_course(self.course_locations[self.XML_COURSEID1].course_key)\r\n # if following raised, then the test is really a noop, change it\r\n self.assertFalse(course.show_calculator, \"Default changed making test meaningless\")\r\n course.show_calculator = True\r\n with self.assertRaises(AttributeError): # ensure it doesn't allow writing\r\n self.store.update_item(course, None)\r\n # now do it for a r/w db\r\n course = self.store.get_course(self.course_locations[self.MONGO_COURSEID].course_key)\r\n # if following raised, then the test is really a noop, change it\r\n self.assertFalse(course.show_calculator, \"Default changed making test meaningless\")\r\n course.show_calculator = True\r\n self.store.update_item(course, None)\r\n course = self.store.get_course(self.course_locations[self.MONGO_COURSEID].course_key)\r\n self.assertTrue(course.show_calculator)",
"def test_readwrite(self):\n db = Database.TestDB(self.mktemp())\n yield db.execute(\"INSERT into TESTTYPE (KEY, VALUE) values (:1, :2)\", (\"FOO\", \"BAR\",))\n items = (yield db.query(\"SELECT * from TESTTYPE\"))\n self.assertEqual(items, ((\"FOO\", \"BAR\"),))\n items = (yield db.queryList(\"SELECT * from TESTTYPE\"))\n self.assertEqual(items, (\"FOO\",))\n db.close()",
"def transaction(self):\n copy = self.copy()\n try:\n yield copy\n except TransactionRollback:\n del copy\n else:\n self.update(copy)",
"def txn(db):\n with db.atomic() as txn:\n yield\n txn.rollback()",
"def test_transaction_update(self):\n currency_endowment = {\"FET\": 100}\n good_endowment = {\"good_id\": 20}\n\n self.ownership_state.init(\n amount_by_currency_id=currency_endowment,\n quantities_by_good_id=good_endowment,\n )\n assert self.ownership_state.amount_by_currency_id == currency_endowment\n assert self.ownership_state.quantities_by_good_id == good_endowment\n tx_message = TransactionMessage(\n performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,\n skill_callback_ids=[PublicId(\"author\", \"a_skill\", \"0.1.0\")],\n tx_id=\"transaction0\",\n tx_sender_addr=\"agent_1\",\n tx_counterparty_addr=\"pk\",\n tx_amount_by_currency_id={\"FET\": -20},\n tx_sender_fee=5,\n tx_counterparty_fee=0,\n tx_quantities_by_good_id={\"good_id\": 10},\n info={\"some_info_key\": \"some_info_value\"},\n ledger_id=\"fetchai\",\n tx_nonce=\"transaction nonce\",\n )\n self.ownership_state._update(tx_message=tx_message)\n expected_amount_by_currency_id = {\"FET\": 75}\n expected_quantities_by_good_id = {\"good_id\": 30}\n assert (\n self.ownership_state.amount_by_currency_id == expected_amount_by_currency_id\n )\n assert (\n self.ownership_state.quantities_by_good_id == expected_quantities_by_good_id\n )",
"def test_unavailable_item(self):\n item, change, _ = give_item_and_change('crisps', .50)\n self.assertIsNone(item)\n self.assertEqual(change, 0.5)",
"def versioned_diffed_update_item(\n table: TableResource,\n item_transformer: ItemTransformer,\n item_key: ItemKey = None,\n *,\n get_item: ItemGetter = strongly_consistent_get_item,\n update_item: ItemUpdater = UpdateOrCreateItem,\n max_attempts_before_failure: int = DEFAULT_MAX_ATTEMPTS_BEFORE_FAILURE,\n item_version_key: str = \"item_version\",\n last_written_key: str = \"last_written_at\",\n random_sleep_on_lost_race: bool = True,\n prewrite_transform: ty.Optional[SimpleTransform] = _DEFAULT_PREDIFF_TRANSFORM,\n item_id: ItemKey = None, # deprecated name, present for backward-compatibility\n nicename: str = DEFAULT_ITEM_NAME,\n) -> Item:\n item_key = item_key or item_id\n assert item_key, \"Must pass item_key or (deprecated) item_id\"\n\n attempt = 0\n max_attempts_before_failure = int(max(1, max_attempts_before_failure))\n update_arguments = None\n\n nice_get_item = _nicename_getter(nicename, get_item)\n\n while attempt < max_attempts_before_failure:\n attempt += 1\n item = nice_get_item(table, item_key)\n cur_item_version = item.get(item_version_key, 0)\n\n logger.debug(f\"Current item version is {cur_item_version}\")\n\n # do the incremental update\n updated_item = item_transformer(copy.deepcopy(item))\n if not updated_item:\n logger.debug(f\"No transformed {nicename} was returned; returning original {nicename}\")\n return item\n assert updated_item is not None\n item_diff = build_update_diff(item, updated_item, prediff_transform=prewrite_transform)\n if not item_diff:\n logger.info(\n f\"Transformed {nicename} was returned but no meaningful difference was found.\",\n extra=dict(json=dict(item=item, updated_item=updated_item)),\n )\n return item\n\n # set incremented item_version and last_written_at on the item_diff\n # and the updated_item - the former will be sent to DynamoDB, the latter\n # returned to the user.\n item_diff[item_version_key] = int(cur_item_version) + 1\n item_diff[last_written_key] = iso8601strict(datetime.utcnow())\n updated_item[item_version_key] = item_diff[item_version_key]\n updated_item[last_written_key] = item_diff[last_written_key]\n\n try:\n # write if no intervening updates\n expr = versioned_item_expression(\n cur_item_version,\n item_version_key,\n id_that_exists=next(iter(item_key.keys())) if item else \"\",\n )\n logger.debug(expr)\n update_arguments = select_attributes_for_set_and_remove(item_diff)\n # store arguments for later logging\n update_item(table, item_key, **update_arguments, **expr)\n return updated_item\n except ClientError as ce:\n if is_conditional_update_retryable(ce):\n msg = (\n \"Attempt %d to update %s in table %s was beaten \"\n + \"by a different update. Sleeping for %s seconds.\"\n )\n sleep = 0.0\n if random_sleep_on_lost_race:\n sleep = random.uniform(MIN_TRANSACTION_SLEEP, MAX_TRANSACTION_SLEEP)\n time.sleep(sleep)\n logger.warning(\n msg,\n attempt,\n nicename,\n table.name,\n f\"{sleep:.3f}\",\n extra=dict(\n json=dict(item_key=item_key, item_diff=item_diff, ce=str(ce), sleep=sleep,)\n ),\n )\n else:\n raise\n raise get_item_exception_type(nicename, VersionedUpdateFailure)(\n f\"Failed to update {nicename} without performing overwrite {item_key}. \"\n f\"Was beaten to the update {attempt} times.\",\n key=item_key,\n table_name=table.name,\n update_arguments=update_arguments,\n )",
"def test_set_item_from_outside(self):\n\n expected = {\n self.file_to_test: {\n \"hello.world\": {\n \"included_at_epoch\": 190.0,\n \"included_at_iso\": \"1970-01-01T01:03:10\",\n \"last_retested_at_epoch\": 190.0,\n \"last_retested_at_iso\": \"1970-01-01T01:03:10\",\n \"status\": PyFunceble.STATUS.official.invalid,\n },\n \"world.hello\": {\n \"included_at_epoch\": 0.0,\n \"included_at_iso\": \"1970-01-01T01:00:00\",\n \"last_retested_at_epoch\": 0.0,\n \"last_retested_at_iso\": \"1970-01-01T01:00:00\",\n \"status\": PyFunceble.STATUS.official.down,\n },\n },\n }\n\n self.inactive_db.database = {\n self.file_to_test: {\n \"world.hello\": {\n \"included_at_epoch\": 0.0,\n \"included_at_iso\": \"1970-01-01T01:00:00\",\n \"last_retested_at_epoch\": 0.0,\n \"last_retested_at_iso\": \"1970-01-01T01:00:00\",\n \"status\": PyFunceble.STATUS.official.down,\n },\n },\n }\n\n self.inactive_db[\"hello.world\"] = {\n \"included_at_epoch\": 190.0,\n \"included_at_iso\": \"1970-01-01T01:03:10\",\n \"last_retested_at_epoch\": 190.0,\n \"last_retested_at_iso\": \"1970-01-01T01:03:10\",\n \"status\": PyFunceble.STATUS.official.invalid,\n }\n\n self.assertEqual(expected, self.inactive_db.database)",
"def test_add_value_singlevalue_singlevalue(self):\n input_item = self.item_class(name=\"foo\")\n il = ItemLoader(item=input_item)\n il.add_value(\"name\", \"bar\")\n loaded_item = il.load_item()\n self.assertIsInstance(loaded_item, self.item_class)\n self.assertEqual(ItemAdapter(loaded_item).asdict(), {\"name\": [\"foo\", \"bar\"]})",
"def test_unavailabe_items(self):\n item, change, _ = give_item_and_change('crisps', .50)\n self.assertIsNone(item)\n self.assertEqual(change, 0.5)",
"def test_update_from_none(self):\r\n ctx = {}\r\n col = columns.Set(columns.Integer, db_field=\"TEST\")\r\n statements = col.get_update_statement({1, 2, 3, 4}, None, ctx)\r\n\r\n #only one variable /statement should be generated\r\n assert len(ctx) == 1\r\n assert len(statements) == 1\r\n\r\n assert ctx.values()[0].value == {1, 2, 3, 4}\r\n assert statements[0] == '\"TEST\" = :{}'.format(ctx.keys()[0])",
"def update_item(self, table, item):",
"def test_update_single_row_if_status_is_in_progress(self):\n first = generate_mock_result(status='IN_PROGRESS', success=False)\n self.db.insert_single_result(first)\n current = self.db.get_result_by_primary_key(first.get('id'))\n self.assertEqual(current.status, 'IN_PROGRESS')\n second = generate_mock_result(status='SUCCESS', success=True)\n self.db.insert_single_result(second)\n current = self.db.get_result_by_primary_key(first.get('id'))\n self.assertEqual(current.status, 'SUCCESS')",
"def test_unavailable_item(self):\n item, change, _ = give_item_and_change('crisps', '1.00 .50')\n self.assertIsNone(item)\n self.assertEqual(change, 1.35)",
"def _apply_item(self, item: Item) -> bool:\n if self.locked:\n self.__locked = item.item_type != self.__key\n return not self.locked",
"def test_add_or_update_state_for_none_state_key(self):\n def test_update_value(name, value):\n return f'{name}-{value}'\n\n state_manager = ActorStateManager(self._fake_actor)\n has_value, val = _run(state_manager.try_get_state('state1'))\n self.assertTrue(has_value)\n self.assertEqual('value1', val)\n\n val = _run(state_manager.add_or_update_state('state1', 'value1', test_update_value))\n self.assertEqual('state1-value1', val)",
"def test_create_continue_version(self):\r\n # start transaction w/ simple creation\r\n user = random.getrandbits(32)\r\n new_course = modulestore().create_course('test_org', 'test_transaction', user)\r\n new_course_locator = new_course.id\r\n index_history_info = modulestore().get_course_history_info(new_course.location)\r\n course_block_prev_version = new_course.previous_version\r\n course_block_update_version = new_course.update_version\r\n self.assertIsNotNone(new_course_locator.version_guid, \"Want to test a definite version\")\r\n versionless_course_locator = new_course_locator.version_agnostic()\r\n\r\n # positive simple case: no force, add chapter\r\n new_ele = modulestore().create_item(\r\n new_course.location, 'chapter', user,\r\n fields={'display_name': 'chapter 1'},\r\n continue_version=True\r\n )\r\n # version info shouldn't change\r\n self.assertEqual(new_ele.update_version, course_block_update_version)\r\n self.assertEqual(new_ele.update_version, new_ele.location.version_guid)\r\n refetch_course = modulestore().get_course(versionless_course_locator)\r\n self.assertEqual(refetch_course.location.version_guid, new_course.location.version_guid)\r\n self.assertEqual(refetch_course.previous_version, course_block_prev_version)\r\n self.assertEqual(refetch_course.update_version, course_block_update_version)\r\n refetch_index_history_info = modulestore().get_course_history_info(refetch_course.location)\r\n self.assertEqual(refetch_index_history_info, index_history_info)\r\n self.assertIn(new_ele.location.version_agnostic(), version_agnostic(refetch_course.children))\r\n\r\n # try to create existing item\r\n with self.assertRaises(DuplicateItemError):\r\n _fail = modulestore().create_item(\r\n new_course.location, 'chapter', user,\r\n block_id=new_ele.location.block_id,\r\n fields={'display_name': 'chapter 2'},\r\n continue_version=True\r\n )\r\n\r\n # start a new transaction\r\n new_ele = modulestore().create_item(\r\n new_course.location, 'chapter', user,\r\n fields={'display_name': 'chapter 2'},\r\n continue_version=False\r\n )\r\n transaction_guid = new_ele.location.version_guid\r\n # ensure force w/ continue gives exception\r\n with self.assertRaises(VersionConflictError):\r\n _fail = modulestore().create_item(\r\n new_course.location, 'chapter', user,\r\n fields={'display_name': 'chapter 2'},\r\n force=True, continue_version=True\r\n )\r\n\r\n # ensure trying to continue the old one gives exception\r\n with self.assertRaises(VersionConflictError):\r\n _fail = modulestore().create_item(\r\n new_course.location, 'chapter', user,\r\n fields={'display_name': 'chapter 3'},\r\n continue_version=True\r\n )\r\n\r\n # add new child to old parent in continued (leave off version_guid)\r\n course_module_locator = new_course.location.version_agnostic()\r\n new_ele = modulestore().create_item(\r\n course_module_locator, 'chapter', user,\r\n fields={'display_name': 'chapter 4'},\r\n continue_version=True\r\n )\r\n self.assertNotEqual(new_ele.update_version, course_block_update_version)\r\n self.assertEqual(new_ele.location.version_guid, transaction_guid)\r\n\r\n # check children, previous_version\r\n refetch_course = modulestore().get_course(versionless_course_locator)\r\n self.assertIn(new_ele.location.version_agnostic(), version_agnostic(refetch_course.children))\r\n self.assertEqual(refetch_course.previous_version, course_block_update_version)\r\n self.assertEqual(refetch_course.update_version, transaction_guid)",
"def atomic(self, savepoint=True):\n return TransactionContext(*self.values(), savepoint=True)",
"def test_update(test_store, andy, pandy, candy):\n n_updated = test_store.update(fields={\"age\": 15}, name=\"Candy\")\n assert n_updated == 1\n items = list(test_store.get_by())\n\n candy.age = 15\n assert andy in items\n assert pandy in items\n assert candy in items",
"def test_update_with_no_matches(test_store, andy, pandy, candy):\n n_updated = test_store.update(fields={\"age\": 15}, name=\"Mark\")\n assert n_updated == 0\n\n items = list(test_store.get_by())\n assert len(items) == 3\n assert andy in items\n assert pandy in items\n assert candy in items",
"async def test_transaction_commit_low_level(database_url):\n\n async with Database(database_url) as database:\n async with database.transaction(force_rollback=True):\n transaction = await database.transaction()\n try:\n query = notes.insert().values(text=\"example1\", completed=True)\n await database.execute(query)\n except: # pragma: no cover\n await transaction.rollback()\n else:\n await transaction.commit()\n\n query = notes.select()\n results = await database.fetch_all(query=query)\n assert len(results) == 1",
"def test_contains_revision(self):\n\n # Note that query logic is tested separately by integration tests. This\n # test just checks that the function maps inputs to outputs as expected.\n\n mock_connection = MagicMock()\n mock_cursor = mock_connection.cursor()\n database = Database(mock_connection)\n\n with self.subTest(name='new revision'):\n mock_cursor.__iter__.return_value = [(0,)]\n\n result = database.contains_revision(sentinel.revision)\n\n # compare with boolean literal to test the type cast\n self.assertIs(result, False)\n query_values = mock_cursor.execute.call_args[0][-1]\n self.assertEqual(query_values, (sentinel.revision,))\n\n with self.subTest(name='old revision'):\n mock_cursor.__iter__.return_value = [(1,)]\n\n result = database.contains_revision(sentinel.revision)\n\n # compare with boolean literal to test the type cast\n self.assertIs(result, True)\n query_values = mock_cursor.execute.call_args[0][-1]\n self.assertEqual(query_values, (sentinel.revision,))",
"def test_transition(self):\n # Make sure we push the upgraded items out of cache\n gc.collect()\n\n self.assertEqual(self.store.getItemByID(1).attribute, 'one')\n self.assertEqual(\n self.store.findUnique(Dummy, Dummy.attribute == 'two').storeID,\n 2)\n self.assertRaises(ItemNotFound, self.store.getItemByID, 3)\n i2 = self.store.getItemByID(4)\n self.assertEqual(i2.attribute, 'four')\n self.assertIsInstance(i2, Dummy2)"
] | [
"0.5932803",
"0.5802937",
"0.5779788",
"0.5756236",
"0.5724148",
"0.56774515",
"0.5541173",
"0.55110294",
"0.5491138",
"0.5489476",
"0.53459036",
"0.5339431",
"0.5324479",
"0.52662617",
"0.522534",
"0.5209724",
"0.520189",
"0.51962876",
"0.5192975",
"0.51924634",
"0.5159292",
"0.51563084",
"0.5142209",
"0.514095",
"0.51321256",
"0.5130111",
"0.51188",
"0.5115734",
"0.5111174",
"0.51057565"
] | 0.6724734 | 0 |
Idempotent definition of key attribute schema for the given table without forcing any IO operations/effects up front. The main reason you might want to do this is if you need to do a `put`, because `put` cannot infer the shape of your key. If the table definition is already present, this is a noop. | def define_table(
transaction: VersionedTransaction, table: TableNameOrResource, *key_attributes: str,
) -> VersionedTransaction:
assert len(key_attributes) > 0 and len(key_attributes) <= 2
if _table_name(table) in transaction.tables:
return transaction
return VersionedTransaction(
tables={
**transaction.tables,
_table_name(table): _TableData(
items=dict(),
effects=dict(),
key_attributes=standard_key_attributes(*key_attributes),
),
}
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _add_table_schema(table_desc, table_name, schema):\n table_desc['TableName'] = table_name\n table_desc['AttributeDefinitions'] = [{\n 'AttributeName': item['name'],\n 'AttributeType': DynamoStubber._encode_type(item['type'])\n } for item in schema]\n table_desc['KeySchema'] = [{\n 'AttributeName': item['name'],\n 'KeyType': item['key_type']\n } for item in schema]",
"def _get_table_schema(self):\n\n return {\n 'AttributeDefinitions': [\n {\n 'AttributeName': self._key_field.name,\n 'AttributeType': self._key_field.data_type\n }\n ],\n 'TableName': self.table_name,\n 'KeySchema': [\n {\n 'AttributeName': self._key_field.name,\n 'KeyType': 'HASH'\n }\n ],\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': self.read_capacity_units,\n 'WriteCapacityUnits': self.write_capacity_units\n }\n }",
"def make_schema_key(schema):\n # @TODO make this use schematics modifiers\n if not isinstance(schema, BaseModel):\n raise TypeError(\"can only make a schema key based on a BaseModel instance.\")\n modifiers = []\n for modifier in MODIFIERS:\n if hasattr(schema, modifier):\n attribute = getattr(schema, modifier)\n try:\n hash(attribute)\n except TypeError:\n attribute = tuple(attribute)\n modifiers.append(attribute)\n\n\n else:\n modifiers.append((modifier, None))\n\n return SchemaKey(schema.__class__, *modifiers)",
"def create_table(self,\n key_schema_definition=__DEFAULT_KEY_SCHEMA,\n attribute_definitions=__DEFAULT_ATTRIBUTE_DEFINITIONS,\n provisioned_throughput=__DEFAULT_PROVISIONED_THROUGHPUT):\n table_name = self.generate_name()\n try:\n table = self.dynamodb.create_table(\n TableName=table_name,\n KeySchema=key_schema_definition,\n AttributeDefinitions=attribute_definitions,\n ProvisionedThroughput=provisioned_throughput)\n except Exception as e:\n raise RuntimeError('DynamoDB could not create table: %s' % e)\n while table.table_status == 'CREATING':\n time.sleep(0.01)\n table = self.dynamodb.Table(table_name)\n self.table_name, self.table = table_name, table",
"def add_annotation_table(database, table_name, key_table, fk_id):\n\n # Connecting to the database file\n conn = sqlite3.connect(database)\n c = conn.cursor()\n\n # Add table\n if key_table == \"exon\":\n fk_statement = \"\"\n else:\n fk_statement = \", FOREIGN KEY (ID) REFERENCES \"+ key_table + \"(\" + fk_id + \")\"\n command = \" CREATE TABLE IF NOT EXISTS \" + table_name + \\\n \"\"\" (ID INTEGER,\n annot_name text,\n source text,\n attribute text,\n value text,\n \n PRIMARY KEY (ID, source, attribute)\"\"\" + fk_statement + \"\"\"); \"\"\"\n c.execute(command)\n conn.commit()\n conn.close()\n return",
"def for_table(cls, table_doc):\n model_doc = {\n 'schemas': {\n table_doc['schema_name']: {\n 'tables': {\n table_doc['table_name']: table_doc\n }\n }\n }\n }\n return cls(model_doc)",
"def ensure_schema(client, table_name):\n query = ''.join([\n 'CREATE TABLE {cf} ',\n '(\"lockId\" ascii, \"claimId\" timeuuid, PRIMARY KEY(\"lockId\", \"claimId\"));'])\n\n def errback(failure):\n failure.trap(InvalidRequestException)\n\n return client.execute(query.format(cf=table_name),\n {}, ConsistencyLevel.QUORUM).addErrback(errback)",
"def htable_put(table, key, value):",
"def __init__(self, table_name, key_name) -> None:\n if not DB_ENDPOINT:\n self.client = boto3.resource(\"dynamodb\")\n else:\n self.client = boto3.resource(\"dynamodb\", endpoint_url=DB_ENDPOINT)\n # for testing -> was not working with env variable for some reason.\n # need to investigate further\n # self.client = boto3.resource(\"dynamodb\", endpoint_url=\"http://localhost:8000\")\n self.table_name = table_name\n self.table_connector = self.client.Table(self.table_name)\n self.primary_key = key_name",
"def s3_table_set_before_write(cls, table):\n\n update_default = cls.s3_table_name_update_default\n\n table._before_insert.append(update_default)\n table._before_update.append(lambda s, data: update_default(data))",
"def test_table_definition(self):\n create_table(LowercaseKeyModel)\n create_table(CapitalizedKeyModel)\n\n delete_table(LowercaseKeyModel)\n delete_table(CapitalizedKeyModel)",
"def schema(self):\n attrs = self.attrs.copy()\n parts = ['CREATE', 'TABLE', self.name, '(%s,' % self.hash_key.schema]\n del attrs[self.hash_key.name]\n if self.range_key:\n parts.append(self.range_key.schema + ',')\n del attrs[self.range_key.name]\n if attrs:\n attr_def = ', '.join([attr.schema for attr in six.itervalues(attrs)])\n parts.append(attr_def + ',')\n\n parts.append(\"THROUGHPUT (%d, %d))\" % (self.read_throughput,\n self.write_throughput))\n parts.extend([g.schema for g in six.itervalues(self.global_indexes)])\n return ' '.join(parts) + ';'",
"def __init__(self, table_name='casbin_rule', **kwargs):\n self.table_name = table_name\n self.dynamodb = boto3.client('dynamodb', **kwargs)\n try:\n\n self.dynamodb.create_table(\n TableName=self.table_name,\n\n AttributeDefinitions=[\n {\n 'AttributeName': 'id',\n 'AttributeType': 'S'\n }\n ],\n KeySchema=[\n {\n 'AttributeName': 'id',\n 'KeyType': 'HASH'\n },\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 10,\n 'WriteCapacityUnits': 10\n }\n )\n except self.dynamodb.exceptions.ResourceInUseException:\n pass",
"def test_table_definition(self):\r\n create_table(LowercaseKeyModel)\r\n create_table(CapitalizedKeyModel)\r\n\r\n delete_table(LowercaseKeyModel)\r\n delete_table(CapitalizedKeyModel)",
"def make_cache_table(metadata, table_name='beaker_cache', schema_name=None):\n return sa.Table(table_name, metadata,\n sa.Column('namespace', sa.String(255), primary_key=True),\n sa.Column('accessed', sa.DateTime, nullable=False),\n sa.Column('created', sa.DateTime, nullable=False),\n sa.Column('data', sa.PickleType, nullable=False),\n schema=schema_name if schema_name else metadata.schema)",
"def __init__(self, region_name=None,\n key_schema_definition=__DEFAULT_KEY_SCHEMA,\n attribute_definitions=__DEFAULT_ATTRIBUTE_DEFINITIONS,\n provisioned_throughput=__DEFAULT_PROVISIONED_THROUGHPUT):\n self.table = None\n self.table_name = None\n self.dynamodb = boto3.resource('dynamodb', region_name=region_name)\n self.key_schema_definition = key_schema_definition\n self.attribute_definitions = attribute_definitions\n self.provisioned_throughput = provisioned_throughput",
"def create_table(self, schema: str, table: str, col_types: dict, non_null_columns: List[str]):\n return",
"def create_table(table_name: str, **db_kwargs) -> str:\n\n # Check if the table exists.\n try:\n client = boto3.client(\"dynamodb\", **db_kwargs)\n resp = client.describe_table(TableName=table_name)\n log.info(\"Table %s exists\", table_name)\n return resp[\"Table\"][\"TableArn\"]\n except Exception as err: # pylint:disable=broad-except\n pass\n\n # Key attributes in the table.\n attrs = [\n {\"AttributeName\": \"Target\", \"AttributeType\": \"S\"},\n {\"AttributeName\": \"PrimaryRangeKey\", \"AttributeType\": \"S\"},\n {\"AttributeName\": \"TargetIDKeys\", \"AttributeType\": \"S\"},\n ]\n\n key_schema = [\n {\"AttributeName\": \"Target\", \"KeyType\": \"HASH\"},\n {\"AttributeName\": \"PrimaryRangeKey\", \"KeyType\": \"RANGE\"},\n ]\n\n global_secondary_indexes = [\n {\n \"IndexName\": \"TargetIDKeysIndex\",\n \"KeySchema\": [\n {\"AttributeName\": \"TargetIDKeys\", \"KeyType\": \"HASH\"},\n {\"AttributeName\": \"PrimaryRangeKey\", \"KeyType\": \"RANGE\"},\n ],\n \"Projection\": {\"ProjectionType\": \"ALL\"},\n \"ProvisionedThroughput\": {\"ReadCapacityUnits\": 100, \"WriteCapacityUnits\": 10},\n }\n ]\n\n try:\n client = boto3.client(\"dynamodb\", **db_kwargs)\n resp = client.create_table(\n TableName=table_name,\n AttributeDefinitions=attrs,\n KeySchema=key_schema,\n GlobalSecondaryIndexes=global_secondary_indexes,\n ProvisionedThroughput={\"ReadCapacityUnits\": 100, \"WriteCapacityUnits\": 10},\n )\n log.info(\"Table %s created successfully\", table_name)\n return resp[\"TableDescription\"][\"TableArn\"]\n except Exception as err: # pylint:disable=broad-except\n raise RuntimeError(\"Error creating table %s: %s\" % (table_name, str(err)))",
"def test_build_base_attribute_key_function_with_table_name_succeeds_with_valid_input(self):\n instrument_id = 4\n attribute_name = \"attribute_string\"\n table_name = \"table_string\"\n expected_key = \"instruments:4:table_string:attribute_string\"\n result = redis_interface.RedisInterface._build_base_attribute_key(instrument_id, attribute_name,\n table_name=table_name)\n\n self.assertEqual(result, expected_key, \"Built key '%s' does not match expected string '%s'.\"\n % (result, expected_key))",
"def get_table_key(row: Dict[str, Any]) -> Union[TableKey, None]:\n if row:\n return TableKey(schema=row['schema'], table_name=row['name'])\n\n return None",
"def primary_key(table_name: str) -> str:\n\n return f\"\"\"\n SELECT\n a.attname AS column_name,\n format_type(a.atttypid, a.atttypmod) AS data_type\n FROM\n pg_index i\n JOIN\n pg_attribute a\n ON\n a.attrelid = i.indrelid AND\n a.attnum = ANY(i.indkey)\n WHERE\n i.indrelid = '{table_name}'::regclass AND\n i.indisprimary\n \"\"\"",
"def _get_table_key(self, row: Dict[str, Any]) -> Union[TableKey, None]:\n if row:\n return TableKey(schema=row['schema'], table_name=row['name'])\n\n return None",
"def _get_sql_fkeys(self, table_attr):\n default_on_def = 'RESTRICT DEFERRABLE INITIALLY IMMEDIATE'\n fkey_template = 'ALTER TABLE \"%s\" ADD CONSTRAINT %s_%s_fkey FOREIGN KEY (%s) REFERENCES %s ON DELETE %s;'\n # index_template = 'CREATE INDEX %s_%s_idx ON %s (%s);'\n fkeys = ''\n\n for col_name, col_attrs in table_attr['columns'].iteritems():\n if col_attrs['reference']:\n fkeys += '\\n' + fkey_template % \\\n (table_attr['name'], table_attr['name'], col_attrs['name'], col_attrs['name'],\n col_attrs['reference'], col_attrs['on_delete'] if 'on_delete' in col_attrs else default_on_def)\n # fkeys += '\\n' + index_template % (table_attr['name'], col_attrs['name'], table_attr['name'], col_attrs['name'])\n\n return fkeys",
"def _create_table_if_not_exists(self) -> None:\n COLUMN_DEFINITIONS = 'definitions'\n COLUMN_TYPE = 'type'\n\n KEY_REF = '$ref'\n\n TYPE_LOOKUP = {\n 'string': 'VARCHAR(255)',\n 'integer': 'INTEGER',\n 'boolean': 'BOOLEAN',\n 'number': 'INTEGER',\n }\n\n def ref_lookup(\n property: Dict[str, Any], fields: Dict[str, Any]\n ) -> Dict[str, Any]:\n ref = property[KEY_REF]\n property_lookup_name = ref[ref.rfind('/') + 1 :]\n return fields[COLUMN_DEFINITIONS][property_lookup_name]\n\n field_queries = []\n fields = json.loads(self.schema.schema_json())\n\n del fields[Keywords.Properties.value][\n Keywords.ID.value\n ] # Remove primary key field. It is handled with auto increment below.\n\n for property_name, property in fields[Keywords.Properties.value].items():\n if KEY_REF in property:\n property = ref_lookup(property, fields)\n field_queries.append(\n f'{property_name} {TYPE_LOOKUP[property[COLUMN_TYPE]]}'\n )\n table_columns = ', '.join(field_queries)\n\n with connect(**BaseModel.db_settings) as connection:\n cursor = connection.cursor()\n cursor.execute(\n f'CREATE TABLE IF NOT EXISTS {self.table_name} (ID INTEGER PRIMARY KEY AUTO_INCREMENT, {table_columns})'\n )\n self._table_created[self.table_name] = True",
"def key(nullable=True):\n return sa.Column(\n \"key\",\n sa.Text().with_variant(mysql.VARCHAR(255), \"mysql\"),\n nullable=nullable,\n )",
"def test_autofield_add_primary_key(self):\n sql = \"\"\"\n CREATE TABLE address_no_primary_key\n (\n id serial NOT NULL,\n address character varying(255),\n geometry geometry(Point,4326)\n );\n \"\"\"\n cursor = self.conn.get_connection().cursor()\n cursor.execute(sql)\n\n layer = DataBaseLayer()\n layer.db_connection = self.conn\n layer.name = 'address_no_primary_key'\n layer.table = 'address_no_primary_key'\n layer.pk_field = 'id'\n layer.geom_field = 'geometry'\n layer.anonymous_view = True\n layer.anonymous_add = True\n layer.anonymous_update = True\n layer.anonymous_delete = True\n layer.save()\n\n with ModelFactory(layer) as Model:\n primary_key = None\n for f in Model._meta.fields:\n if getattr(f, 'primary_key', None):\n primary_key = f.name\n break\n self.assertEqual(primary_key, 'id')",
"def get_table_attributes(jwt_payload: dict, schema_name: str, table_name: str):\n DJConnector.set_datajoint_config(jwt_payload)\n\n schema_virtual_module = dj.create_virtual_module(schema_name, schema_name)\n table_attributes = dict(primary_attributes=[], secondary_attributes=[])\n for attribute_name, attribute_info in getattr(schema_virtual_module,\n table_name).heading.attributes.items():\n if attribute_info.in_key:\n table_attributes['primary_attributes'].append((\n attribute_name,\n attribute_info.type,\n attribute_info.nullable,\n attribute_info.default,\n attribute_info.autoincrement\n ))\n else:\n table_attributes['secondary_attributes'].append((\n attribute_name,\n attribute_info.type,\n attribute_info.nullable,\n attribute_info.default,\n attribute_info.autoincrement\n ))\n\n return table_attributes",
"def test_build_base_attribute_key_function_without_table_name_succeeds_with_valid_input(self):\n instrument_id = 4\n attribute_name = \"attribute_string\"\n expected_key = \"instruments:4:attribute_string\"\n result = redis_interface.RedisInterface._build_base_attribute_key(instrument_id, attribute_name)\n\n self.assertEqual(result, expected_key, \"Built key '%s' does not match expected string '%s'.\"\n % (result, expected_key))",
"def set_primary_key(self, table, field):\n self._check_field(table, field, exists=True)\n\n field_meta = self.get_fields(table).get(field)\n field_subtype = self._get_key_subtype(field_meta)\n\n table_meta = self._metadata['tables'][table]\n table_meta['fields'][field] = {\n 'type': 'id',\n 'subtype': field_subtype\n }\n table_meta['primary_key'] = field",
"def index_schema_builder(table):\n conn = table.parent.parent.connection\n\n idx = OrderedDict()\n indexes = conn.execute(\"SHOW INDEXES FROM `%s`.`%s`\" % (table.parent.name, table.name))\n\n if not indexes:\n return idx\n\n for index in indexes:\n n = index['Key_name']\n if n not in idx:\n indexitem = IndexSchema(name=n, parent=table)\n indexitem.non_unique = (bool(index['Non_unique'])) # == not unique\n indexitem.table_name = index['Table']\n\n key_type = index['Index_type'].upper()\n\n if index['Key_name'].upper() == \"PRIMARY\":\n indexitem.kind = \"PRIMARY\"\n elif not indexitem.non_unique:\n indexitem.kind = \"UNIQUE\"\n elif key_type in ('FULLTEXT', 'SPATIAL'):\n indexitem.kind = key_type\n else:\n indexitem.kind = \"INDEX\"\n\n if key_type in ('BTREE', 'HASH', 'RTREE'):\n indexitem.type = key_type\n\n indexitem.collation = index['Collation']\n indexitem.comment = index['Comment']\n\n idx[n] = indexitem\n\n if index['Column_name'] not in idx[n].fields:\n idx[n].fields.insert(index['Seq_in_index'], (index['Column_name'], index['Sub_part'] or 0))\n\n return idx"
] | [
"0.60570586",
"0.5902038",
"0.5678898",
"0.5671591",
"0.5581425",
"0.55520034",
"0.5543876",
"0.55083936",
"0.53274363",
"0.53022087",
"0.52965194",
"0.5224999",
"0.52233547",
"0.5223007",
"0.5222796",
"0.52002573",
"0.51673585",
"0.5139907",
"0.5126423",
"0.51261",
"0.5116057",
"0.5109578",
"0.5108711",
"0.5091572",
"0.5054159",
"0.50330794",
"0.5019969",
"0.49757564",
"0.4932078",
"0.4925102"
] | 0.6186573 | 0 |
Given a relpath like drake/pkg/res.txt or external/repo/pkg/res.txt, find the data file and return its path | def find_data(relpath):
# Because we are in a py_binary, Bazel's wrapper script sets up our
# $PYTHONPATH to have our resources somewhere on a sys.path entry.
for one_path in sys.path:
possible = os.path.join(one_path, relpath)
if os.path.exists(possible):
return possible
raise IOError(
errno.ENOENT,
"Could not find data {}".format(relpath),
relpath) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_path(data_path):\n\treturn os.path.dirname(os.path.realpath(__file__)) + os.sep + data_path",
"def get_data_file(f):\n if os.path.isfile(f):\n path = f\n\n else:\n p = pkg_resources.resource_filename('PaSDqc', \"db/{}\".format(f))\n \n if os.path.isfile(p):\n path = p\n else:\n raise IOError(\"{} is neither a system file nor a site-package file. Are you sure you have the right file name?\".format(f))\n\n return path",
"def get_data_in_paths(dfile, paths):\n for pth in paths:\n for f in os.listdir(pth):\n if f == dfile:\n return os.path.abspath(os.path.join(pth, dfile))",
"def FindDataFile(filename):\n filename = os.path.expanduser(filename)\n if os.path.exists(filename):\n return filename\n\n # If it's not a relative path, we can't do anything useful.\n if os.path.isabs(filename):\n return filename\n\n other_places = [os.getcwd(),\n os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'Contents', 'Resources'),\n os.path.join(os.getcwd(), 'namebench.app', 'Contents', 'Resources'),\n os.path.join(os.getcwd(), '..'),\n os.path.join(sys.prefix, 'namebench'),\n '/usr/local/share/namebench'\n '/usr/local/etc/namebench',\n '/usr/local/namebench',\n '/etc/namebench',\n '/usr/share/namebench',\n '/usr/namebench']\n for directory in reversed(sys.path):\n other_places.append(directory)\n other_places.append(os.path.join(directory, 'namebench'))\n\n for place in other_places:\n path = os.path.join(place, filename)\n if os.path.exists(path):\n return path\n\n print 'I could not find \"%s\". Tried:' % filename\n for path in other_places:\n print ' %s' % path\n return filename",
"def get_data_filename(relative_path): #TODO put in utils\n\n import os\n from pkg_resources import resource_filename\n fn = resource_filename('mdfptools', os.path.join('data', relative_path))\n\n if not os.path.exists(fn):\n raise ValueError(\"Sorry! %s does not exist. If you just added it, you'll have to re-install\" % fn)\n\n return fn",
"def get_data(path=None):\n import os\n location = os.path.dirname(__file__).replace('/fun', '/ax')\n if path is None:\n print(\"Choose one: \")\n print(\"\\n\".join(os.listdir(os.path.abspath(location))))\n else:\n return os.path.join(os.path.abspath(location), path)",
"def getDataPath():\n\treturn \"..\" + os.sep + \"data\" + os.sep",
"def get_data(path):\n root = os.path.abspath(os.path.dirname(__file__))\n return os.path.join(root, 'data', path)",
"def get_data_path():\n\n # Get pathname absolute or relative.\n path = os.path.join(\n os.path.dirname(__file__), __malstor_data_directory__)\n\n abs_data_path = os.path.abspath(path)\n if not os.path.exists(abs_data_path):\n raise project_path_not_found\n\n return abs_data_path",
"def dataPath(relative):\n return os.path.join(_dataDir, relative)",
"def get_data(path):\n return os.path.join(_ROOT, 'data', path)",
"def get_abspath(relpath, name, version=None):\n\n abspath = join(get_data_dir(), relpath)\n\n if not os.path.exists(abspath):\n url = get_url(name, version)\n\n # If it's a tar file, download and unpack a directory.\n if url.endswith(\".tar.gz\") or url.endswith(\".tar\"):\n dirname = os.path.dirname(abspath)\n download_dir(url, dirname)\n\n # ensure that tarfile unpacked into the expected directory\n if not os.path.exists(abspath):\n raise RuntimeError(\"Tarfile not unpacked into expected \"\n \"subdirectory. Please file an issue.\")\n\n # Otherwise, its a single file.\n else:\n download_file(url, abspath)\n\n return abspath",
"def get_data_path():\n return os.getcwd() + \"/data/\"",
"def locate(path):\n if (test_is_on_hadoop()):\n # Jenkins jobs create symbolic links to smalldata and bigdata on the machine that starts the test. However,\n # in an h2o multinode hadoop cluster scenario, the clustered machines don't know about the symbolic link.\n # Consequently, `locate` needs to return the actual path to the data on the clustered machines. ALL jenkins\n # machines store smalldata and bigdata in /home/0xdiag/. If ON.HADOOP is set by the run.py, the path arg MUST\n # be an immediate subdirectory of /home/0xdiag/. Moreover, the only guaranteed subdirectories of /home/0xdiag/ are\n # smalldata and bigdata.\n p = os.path.realpath(os.path.join(\"/home/0xdiag/\",path))\n if not os.path.exists(p): raise ValueError(\"File not found: \" + path)\n return p\n else:\n tmp_dir = os.path.realpath(os.getcwd())\n possible_result = os.path.join(tmp_dir, path)\n while (True):\n if (os.path.exists(possible_result)):\n return possible_result\n\n next_tmp_dir = os.path.dirname(tmp_dir)\n if (next_tmp_dir == tmp_dir):\n raise ValueError(\"File not found: \" + path)\n\n tmp_dir = next_tmp_dir\n possible_result = os.path.join(tmp_dir, path)",
"def locate_data():\n # Locate by using the environment variable\n if \"TESSDATA_PREFIX\" in os.environ:\n data_prefix = os.environ[\"TESSDATA_PREFIX\"]\n\n if os.path.isdir(data_prefix):\n return data_prefix\n\n # Locate by using the command directory\n cmd_path = os.path.dirname(_config.command)\n\n if cmd_path:\n cmd_data_path = os.path.join(cmd_path, \"tessdata\")\n\n if os.path.isdir(cmd_data_path):\n return cmd_data_path\n\n return None",
"def get_data_filename(relative_path):\n\n fn = resource_filename('yank', relative_path)\n\n if not os.path.exists(fn):\n raise ValueError(\"Sorry! %s does not exist. If you just added it, you'll have to re-install\" % fn)\n\n return fn",
"def get_fspath ( self, relpath=None ):\n if relpath:\n return self.root + os.sep + str ( relpath )\n else:\n return self.root",
"def get_data_file():\n this_directory = os.path.dirname(__file__)\n parent_directory = os.path.dirname(this_directory)\n return os.path.join(parent_directory, '_data/fortunes.txt')",
"def get_path(d, f):\n path = os.path.join(d, f)\n check_file(path)\n return path",
"def find_asset(path, root=None):\n if root is None:\n root = 'z:\\\\Leif\\\\Dropbox\\\\mugen\\\\testing-build\\\\'\n\n check = ('', 'data', 'stages', 'sound')\n for folder in (join(root, i) for i in check):\n candidate = join(folder, path)\n if exists(candidate):\n return candidate\n\n return \"<NO PATH TO FILE>\"",
"def _GetRelPath(self, filename):\r\n absname = os.path.join(self.repo_dir, filename)\r\n return os.path.relpath(absname)",
"def _get_path(): # THIS IS JUST FOR GETTING THE FILE\n return os.path.dirname(os.path.abspath(__file__)) + '/'",
"def file_path(name):\n base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_dir, 'data', name.lower())",
"def test_repo_relpath(self):\n from os import path\n repodir = \"~/codes/ci/tests\"\n relpath = \"../pyci/config.py\"\n result = path.expanduser(\"~/codes/ci/pyci/config.py\")\n self.assertEqual(result, get_repo_relpath(repodir, relpath))",
"def rospath(fname,checkfs=True):\n\tif checkfs: assert os.path.exists(fname)\n\tif checkfs: fname = os.path.abspath(fname)\n\tfname = fname.rstrip(\"/\")\n\tmark = \"rosetta_source/src\"\n\tassert fname.find(mark) > 0\n\tr = fname[:fname.find(mark)+len(mark)-4]\t\n\treturn r",
"def resolve(fname):\n if os.path.dirname(__file__):\n return os.path.dirname(__file__) + \"/../common/\" + fname\n else:\n return \"/common/\" + fname",
"def get_pdata_path(base_name, recurs):\n base_name = base_name.replace(os.sep, '_')\n return join(PYLINT_HOME, \"%s%s%s\"%(base_name, recurs, '.stats'))",
"def get_file_path(filename, path='Data/'):\n path= os.path.abspath(os.path.dirname(path))\n return os.path.join(path, filename)",
"def get_file_path(filename):\n here_dir = os.path.dirname(os.path.abspath(__file__))\n file_dir = os.path.join(here_dir, \"../data/\", filename)\n\n return file_dir",
"def relpath(filename):\n return os.path.join(os.path.dirname(__file__), filename)"
] | [
"0.71274626",
"0.6904814",
"0.6769969",
"0.6497312",
"0.6490332",
"0.6470966",
"0.64500815",
"0.64413446",
"0.64217675",
"0.6399083",
"0.63932604",
"0.63386345",
"0.6311808",
"0.62965494",
"0.62960714",
"0.6275206",
"0.62670004",
"0.62568855",
"0.62269145",
"0.62260854",
"0.6224592",
"0.620259",
"0.6202502",
"0.61974025",
"0.6192416",
"0.61922914",
"0.6184928",
"0.6175352",
"0.6172458",
"0.61521024"
] | 0.7789457 | 0 |
Build a userinput_listener coroutine. | def user_input_listener(state: SharedState): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _listen(self):\n users = fileIO.load_json(\"users.json\")\n print(\"The list of users is: \")\n for i in users:\n print(users[i][\"name\"])\n name = False\n while not name: #Loop until valid user given\n name = input(\"Please enter the user that you would like to start listening to events for: \")\n userID = self._get_user_id(name)\n if not userID:\n name = False\n #Output\n command = \"listen {0}\".format(userID)\n return(command)",
"def listen(self):\n while self.active:\n self.handle_input()",
"def handle_user_input(loop, client):\n login_data = {'USERNAME': ''}\n default_message = {'MESSAGES': []}\n file_upload = {'FILE_UPLOAD': ()}\n file_download = {'FILE_DOWNLOAD': ''}\n ip_address = {'IP': ()}\n ip = socket.gethostbyname(socket.gethostname())\n\n ip_address['IP'] = (ip, 'CHECK')\n data_json = json.dumps(ip_address)\n byte_json = data_json.encode('ascii')\n byte_count = struct.pack(\"!I\", len(byte_json))\n client.send_message(byte_count)\n client.send_message(byte_json)\n yield from asyncio.sleep(1)\n\n while not client.login_status:\n message = yield from loop.run_in_executor(None, input, \"> Enter your username: \")\n if message == \"quit\" or message == 'exit':\n loop.stop()\n return\n\n login_data[\"USERNAME\"] = message\n data_json = json.dumps(login_data)\n byte_json = data_json.encode('ascii')\n byte_count = struct.pack(\"!I\", len(byte_json))\n\n client.send_message(byte_count)\n client.send_message(byte_json)\n\n yield from asyncio.sleep(1)\n\n login_data['USERNAME'] = ''\n\n while client.login_status:\n message = yield from loop.run_in_executor(None, input, \"{} >>> \".format(client.username))\n\n if message == \"quit\" or message == 'exit':\n loop.stop()\n return\n if message:\n if message[0] == '/':\n if message.split(' ', maxsplit=1)[0][1:] == 'help':\n list_commands()\n\n elif message.split(' ', maxsplit=1)[0][1:] == 'w':\n username = message.split(' ', maxsplit=2)[1]\n private_message = message.split(' ', maxsplit=2)[2]\n complete_message = (client.username, username, calendar.timegm(time.gmtime()),\n private_message)\n default_message['MESSAGES'].append(complete_message)\n data_json = json.dumps(default_message)\n byte_json = data_json.encode('ascii')\n byte_count = struct.pack('!I', len(byte_json))\n\n client.send_message(byte_count)\n client.send_message(byte_json)\n\n elif message.split(' ', maxsplit=1)[0][1:] == 'file':\n filename = message.split(' ', maxsplit=1)[1]\n try:\n open_file = open(filename, 'r')\n data = open_file.read()\n file_upload['FILE_UPLOAD'] = (filename, data)\n data_json = json.dumps(file_upload)\n byte_json = data_json.encode('ascii')\n byte_count = struct.pack('!I', len(byte_json))\n client.send_message(byte_count)\n client.send_message(byte_json)\n except exec as e:\n print('-----------------------')\n print('File Upload Error: {}'.format(e))\n print('-----------------------')\n\n elif message.split(' ', maxsplit=1)[0][1:] == 'file_download':\n filename = message.split(' ', maxsplit=1)[1]\n file_download['FILE_DOWNLOAD'] = filename\n data_json = json.dumps(file_download)\n byte_json = data_json.encode('ascii')\n byte_count = struct.pack('!I', len(byte_json))\n client.send_message(byte_count)\n client.send_message(byte_json)\n\n elif message.split(' ', maxsplit=1)[0][1:] == 'save':\n ip_address['IP'] = ('SAVE', ip)\n data_json = json.dumps(ip_address)\n byte_json = data_json.encode('ascii')\n byte_count = struct.pack('!I', len(byte_json))\n client.send_message(byte_count)\n client.send_message(byte_json)\n\n else:\n if message.split(' ', maxsplit=1)[0][1:] == 'feed':\n client.feed = False\n complete_message = (client.username, 'ALL', calendar.timegm(time.gmtime()), message)\n default_message['MESSAGES'].append(complete_message)\n data_json = json.dumps(default_message)\n byte_json = data_json.encode('ascii')\n byte_count = struct.pack('!I', len(byte_json))\n client.send_message(byte_count)\n client.send_message(byte_json)\n yield from asyncio.sleep(1)\n\n else:\n complete_message = (client.username, 'ALL', calendar.timegm(time.gmtime()), message)\n default_message['MESSAGES'].append(complete_message)\n data_json = json.dumps(default_message)\n byte_json = data_json.encode('ascii')\n byte_count = struct.pack('!I', len(byte_json))\n client.send_message(byte_count)\n client.send_message(byte_json)\n yield from asyncio.sleep(1)\n\n default_message['MESSAGES'] = []\n file_upload['FILE_UPLOAD'] = ()\n file_download['FILE_DOWNLOAD'] = ''\n ip_address[\"IP\"] = ()",
"def start_listener():\n listener = keyboard.Listener(\n on_press=on_press\n )\n listener.start()",
"def new_user_input(self):\n ready, _, _ = select([stdin], [], [], 0.0)\n return stdin in ready",
"def build_listener(self):\n # background = LiveSpeech(**sc.background_config)\n\n \"\"\"Creating an object for an activation word\"\"\"\n activation = LiveSpeech(activation_config={\n 'lm': False,\n 'keyphrase': 'eva',\n 'kws_threshold': self.settings.twsVol,\n })\n\n status = threading.Event()\n\n signal.signal(signal.SIGUSR1, self.handler)\n\n pid = os.getpid()\n\n activation_thread = threading.Thread(name='wait_activ_phrase', target=self.processing_activation_phrase,\n args=(activation, status, pid))\n\n activation_thread.start()",
"async def run(self):\n self.add_msg(\"Type your nickname\")\n # Start the new thread that will listen to responses, while the main thread is sending answers\n start_new_thread(self.listenToRespone, ())",
"def user_input(self):\n\n # Above, we set the timeout of getch() on entryscreen to 500ms. That means\n # that the invalid character (-1) is returned every 500 ms if the user\n # enters nothing, and our validator is called. We take this opportunity to\n # relese the curses lock so any other threads (e.g. the message handling\n # thread) have a chance to update the screen. Additionally, we call\n # update() so that any other changes are picked up. We raise _StoppedError\n # to get out of the surrounding loop in edit() so that we can exit this\n # function cleanly and without hijacking any other exceptions (such as\n # KeyboardInterrupt).\n\n class _StoppedError(Exception):\n pass\n\n def validator(ch):\n if ch == curses.KEY_RESIZE:\n self.chatscreen.clear()\n (y, x) = self.global_screen.getmaxyx()\n curses.resizeterm(y, x)\n self.chatscreen.resize(y-Chat.CHATBOX_SIZE, x)\n self.entryscreen.mvwin(y-Chat.CHATBOX_SIZE, 0)\n self.update()\n return None\n try:\n self.curses_lock.release()\n if not self.running:\n raise _StoppedError\n self.update() # has anything changed?\n if ch < 0:\n return None\n return ch\n finally:\n self.curses_lock.acquire()\n\n try:\n self.curses_lock.acquire()\n cmd = self.textpad.edit(validator)\n self.entryscreen.clear()\n except _StoppedError:\n return ''\n finally:\n self.curses_lock.release()\n\n # strip the newlines out of the middle of the words\n cmd = string.replace(cmd, '\\n', '')\n\n # remove unprintable characters\n cmd = (''.join(c if c in string.printable else '' for c in cmd)).strip()\n\n # process commands if necessary\n if cmd.startswith('/'):\n words = cmd.split()\n cmdname = words[0][1:]\n args = words[1:]\n\n if cmdname in self.commands:\n try:\n self.commands[cmdname](*args)\n except CommandError as e:\n self.message('System:', 'Problem executing command: ' + str(e))\n except TypeError as e:\n self.message('System:', str(e))\n else:\n self.message('System:', 'Unknown command: '+cmdname)\n else:\n # it's not a cmd so it must be a message to send\n self.q.put(cmd)\n self.update()",
"def build_user_input(self):\n pass",
"def UserInput(self, username, userinput):\n pass",
"def start(self):\n self.has_event = False\n self.running = True\n self._condition.acquire()\n self._thread = threading.Thread(target=read_input, args=(self,))\n self._thread.start()",
"def wait_for_input(self):\n pass",
"def listen(self):\n self.processor_thread = Thread(target = self.event_loop, name=\"InputThread-\"+str(self.thread_index), args=(self.thread_index, ))\n self.thread_index += 1\n self.processor_thread.daemon = True\n self.processor_thread.start()",
"def on_user_input(self, dut_address, reply_boolean, expected_ui_event):\n pass",
"def input_thread(L):\n raw_input()\n L.append(None)",
"async def async_step_user(\n self, user_input: dict[str, str] | None = None\n ) -> FlowResult:\n if user_input is not None:\n self._async_abort_entries_match(\n {CONF_SERIAL_PORT: user_input[CONF_SERIAL_PORT]}\n )\n\n return self.async_create_entry(\n title=DEFAULT_TITLE,\n data=user_input,\n )\n\n data_schema = self.add_suggested_values_to_schema(DATA_SCHEMA, user_input)\n return self.async_show_form(step_id=\"user\", data_schema=data_schema)",
"async def async_step_user(\n self, user_input: dict[str, Any] | None = None\n ) -> FlowResult:\n if user_input is not None:\n self._async_abort_entries_match(\n {\n CONF_LANG: user_input[CONF_LANG],\n CONF_TLD: user_input[CONF_TLD],\n }\n )\n return self.async_create_entry(\n title=\"Google Translate text-to-speech\", data=user_input\n )\n\n return self.async_show_form(step_id=\"user\", data_schema=STEP_USER_DATA_SCHEMA)",
"def handle_input():\n\n # wait for user input and get timeout or character to process\n char = read_input()\n\n # handle user input\n if not is_input_valid(char):\n # No valid input, keep waiting for input\n return True\n\n # if terminal size is not valid, stop here\n if not nuqql.config.WinConfig.is_terminal_valid():\n show_terminal_warning()\n return True\n\n # if terminal resized, resize and redraw active windows\n if char == curses.KEY_RESIZE:\n nuqql.conversation.resize_main_window()\n return True\n\n # pass user input to active conversation\n for conv in nuqql.conversation.CONVERSATIONS:\n if conv.is_active():\n conv.process_input(char)\n return True\n\n # if no conversation is active pass input to active list window\n if nuqql.win.MAIN_WINS[\"list\"].state.active:\n # list window navigation\n nuqql.win.MAIN_WINS[\"input\"].redraw()\n nuqql.win.MAIN_WINS[\"log\"].redraw()\n nuqql.win.MAIN_WINS[\"list\"].process_input(char)\n return True\n\n # list window is also inactive -> user quit\n return False",
"async def async_step_user(\n self, user_input: dict[str, Any] | None = None\n ) -> FlowResult:\n if self._async_current_entries():\n return self.async_abort(reason=\"single_instance_allowed\")\n\n if user_input is None:\n return self.async_show_form(\n step_id=\"user\", data_schema=STEP_USER_DATA_SCHEMA\n )\n\n errors = {}\n\n try:\n await validate_input(self.hass, user_input)\n except error.APIConnectionError:\n errors[\"base\"] = \"cannot_connect\"\n except error.AuthenticationError:\n errors[\"base\"] = \"invalid_auth\"\n except Exception: # pylint: disable=broad-except\n _LOGGER.exception(\"Unexpected exception\")\n errors[\"base\"] = \"unknown\"\n else:\n return self.async_create_entry(title=\"OpenAI Conversation\", data=user_input)\n\n return self.async_show_form(\n step_id=\"user\", data_schema=STEP_USER_DATA_SCHEMA, errors=errors\n )",
"def handle_input(self, event):\n pass",
"async def async_step_user(self, user_input=None):\n if self._async_current_entries():\n return self.async_abort(reason=\"single_instance_allowed\")\n\n self._errors = {}\n\n data_schema = {\n vol.Required(CONF_HOST, default=self._host): str,\n vol.Required(CONF_PORT, default=self._port): int,\n vol.Required(CONF_CLIENT_ID, default=self._client_id): str,\n vol.Required(CONF_ADD_LEDS, default=self._add_leds): bool,\n }\n\n if user_input is not None:\n self._host = str(user_input[CONF_HOST])\n self._port = user_input[CONF_PORT]\n self._client_id = user_input[CONF_CLIENT_ID]\n self._add_leds = user_input[CONF_ADD_LEDS]\n\n try:\n await asyncio.wait_for(\n self.hass.async_add_executor_job(_try_connect, self._host, self._port, self._client_id),\n timeout=CONN_TIMEOUT,\n )\n\n await self.async_set_unique_id(DOMAIN)\n self._abort_if_unique_id_configured()\n\n return self.async_create_entry(\n title=DOMAIN,\n data={\n CONF_HOST: self._host,\n CONF_PORT: self._port,\n CONF_CLIENT_ID: self._client_id,\n CONF_ADD_LEDS: self._add_leds,\n },\n )\n\n except (asyncio.TimeoutError, CannotConnect):\n result = RESULT_CONN_ERROR\n\n if self._is_import:\n _LOGGER.error(\n \"Error importing from configuration.yaml: %s\",\n RESULT_LOG_MESSAGE.get(result, \"Generic Error\"),\n )\n return self.async_abort(reason=result)\n\n self._errors[\"base\"] = result\n\n return self.async_show_form(\n step_id=\"user\",\n data_schema=vol.Schema(data_schema),\n errors=self._errors,\n )",
"async def async_process_input(self, inp: inputs.Input) -> None:\n raise NotImplementedError()",
"async def async_step_user(self, user_input=None):\n if not user_input:\n return await self._show_form()\n\n identifier = \"{0}, {1}\".format(\n user_input[CONF_LATITUDE], user_input[CONF_LONGITUDE]\n )\n if identifier in configured_instances(self.hass):\n return await self._show_form({\"base\": \"identifier_exists\"})\n\n if self.hass.config.units.name == CONF_UNIT_SYSTEM_IMPERIAL:\n user_input[CONF_UNIT_SYSTEM] = CONF_UNIT_SYSTEM_IMPERIAL\n else:\n user_input[CONF_UNIT_SYSTEM] = CONF_UNIT_SYSTEM_METRIC\n\n # When importing from `configuration.yaml`, we give the user\n # flexibility by allowing the `window` parameter to be any type\n # of time period. This will always return a timedelta; unfortunately,\n # timedeltas aren't JSON-serializable, so we can't store them in a\n # config entry as-is; instead, we save the total seconds as an int:\n if CONF_WINDOW in user_input:\n user_input[CONF_WINDOW] = user_input[CONF_WINDOW].total_seconds()\n else:\n user_input[CONF_WINDOW] = DEFAULT_WINDOW.total_seconds()\n\n return self.async_create_entry(title=identifier, data=user_input)",
"async def async_step_user(\n self, user_input: dict[str, Any] | None = None\n ) -> FlowResult:\n errors = {}\n if user_input is not None:\n self._async_abort_entries_match({CONF_HOST: user_input[CONF_HOST]})\n\n if (\n error := await self.hass.async_add_executor_job(\n self._try_connect, user_input\n )\n ) is None:\n return self.async_create_entry(\n title=DEFAULT_NAME,\n data=user_input,\n )\n errors[\"base\"] = error\n\n user_input = user_input or {}\n return self.async_show_form(\n step_id=\"user\",\n data_schema=vol.Schema(\n {\n vol.Required(\n CONF_HOST, default=user_input.get(CONF_HOST, self.ip_address)\n ): str,\n vol.Optional(\n CONF_USERNAME,\n default=user_input.get(CONF_USERNAME, DEFAULT_USERNAME),\n ): str,\n vol.Required(CONF_PASSWORD): str,\n vol.Required(CONF_USE_LEGACY_PROTOCOL): bool,\n }\n ),\n errors=errors,\n )",
"async def async_step_user(\n self, user_input: dict[str, Any] | None = None\n ) -> FlowResult:\n if is_hassio(self.hass):\n return await self.async_step_on_supervisor()\n\n return await self.async_step_manual()",
"def launch_request_handler(handler_input):\n # type: (HandlerInput) -> Response\n speech = \"Welcome to the Alexa Skills Kit color session sample.\"\n\n handler_input.response_builder.speak(\n speech + \" \" + help_text).ask(help_text)\n return handler_input.response_builder.response",
"def get_user_input(self):\n while not self.suspended:\n input = raw_input()\n input = input.split('|')\n if input[0] in ['exit', 'quit', 'kill']:\n self.broadcast('kill')\n self.suspended = True\n for client in self.clients.values():\n client.socket.close()\n self.s.close() # Have to connect to socket to exit server.\n sock = socket(AF_INET, SOCK_STREAM)\n port = bind_to_random(sock)\n sock.connect((str(self.ip), self.port))\n elif len(input) > 1:\n msg = '|'.join(['#server']+input[1:])\n if input[0][:1] == '@':\n destination = input[0][1:].lower()\n if destination == 'server':\n print msg\n elif destination == 'all':\n self.broadcast(msg)\n else:\n client = self.clients.get(destination, None)\n if client:\n client_send(client.socket, msg)\n else:\n print 'Destination not active'\n else:\n print msg",
"async def __bufferedReader():\n while True:\n # Get char and then append to prevent a race condition caused by the async await\n charIn = await __terminalState.osSupport.getInputChar()\n\n wasHandled = False\n for key, handlers in __terminalState.inputHandlers.items():\n if key is None or charIn in key:\n for handler in handlers:\n asyncio.get_event_loop().call_soon(handler, charIn)\n wasHandled = True\n\n if not wasHandled:\n __terminalState.inputBuffer += charIn",
"def read_input(inp):\n epoll = select.epoll()\n epoll.register(sys.stdin.fileno(), select.EPOLLIN)\n while inp.running:\n if is_terminated():\n return\n\n events = epoll.poll(1)\n for fileno, event in events:\n line = \"[\"\n while \"[\" in line:\n line = sys.stdin.readline().strip(\",\").strip()\n inp.has_event = True\n try:\n event = json.loads(line)\n if \"instance\" in event:\n inp.callback(event)\n inp.redraw()\n except ValueError:\n pass\n epoll.unregister(sys.stdin.fileno())\n epoll.close()\n inp.has_event = True\n inp.clean_exit = True",
"def listen(device_input, callback):\n while True:\n time.sleep(0.01)\n event = readControlDataRaw(device_input)\n (control_id, control_type, event_type, value) = parseControlEvent(event)\n if control_id != -1:\n callback(control_id, control_type, event_type, value)"
] | [
"0.60229254",
"0.5902512",
"0.5665694",
"0.5660546",
"0.5540109",
"0.552596",
"0.55182016",
"0.5446837",
"0.54210794",
"0.53281254",
"0.53130955",
"0.5302447",
"0.52767766",
"0.52541715",
"0.5171928",
"0.51666707",
"0.5162627",
"0.51598024",
"0.51451725",
"0.5137769",
"0.503474",
"0.50253594",
"0.50186735",
"0.5017962",
"0.5017549",
"0.50124866",
"0.5011306",
"0.5010744",
"0.5006803",
"0.49527496"
] | 0.74263144 | 0 |
Returns length of longest increasing subsequence given an array of numbers. | def longestIncreasingSubsequence(nums):
if not nums:
return 0
dp = [None] * len(nums)
dp[0] = 1
maxans = 1
for i in range(1, len(dp)):
maxval = 0
for j in range(0, i):
if nums[i] > nums[j]:
maxval = max(maxval, dp[j])
dp[i] = maxval + 1
maxans = max(maxans, dp[i])
return maxans | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_length_of_longest_sub_array(l):\n if len(l) < 1:\n return 0\n\n longest_seen_sequence = 0\n\n this_sequence_length = 1\n\n previous = l[0]\n\n for _, current in enumerate(l):\n\n if current > previous:\n this_sequence_length = this_sequence_length + 1\n\n if this_sequence_length > longest_seen_sequence:\n longest_seen_sequence = this_sequence_length\n\n else:\n this_sequence_length = 1\n\n if this_sequence_length > longest_seen_sequence:\n longest_seen_sequence = this_sequence_length\n\n previous = current\n\n return longest_seen_sequence",
"def longestCommomSubsequence(self, arrays: List[List[int]]) -> List[int]:\n counts = Counter(val for arr in arrays for val in arr)\n res = []\n for val, count in counts.items():\n if count == len(arrays): res.append(val)\n return res",
"def longest_run(L):\n\tlongest_length = 1\n\tincreasing_length = 1\n\tdecreasing_length = 1\n\tfor i in range(len(L) - 1):\n\t\tif L[i] >= L[i+1]:\n\t\t\tdecreasing_length += 1\n\t\telse:\n\t\t\tdecreasing_length = 1\n\t\tif L[i] <= L[i+1]:\n\t\t\tincreasing_length += 1\n\t\telse:\n\t\t\tincreasing_length = 1\n\t\tif increasing_length > longest_length:\n\t\t\tlongest_length = increasing_length\n\t\t\trun_end = i + 1\n\t\telif decreasing_length > longest_length:\n\t\t\tlongest_length = decreasing_length\n\t\t\trun_end = i + 1\n\n\treturn sum(L[run_end - longest_length + 1 : run_end+1])",
"def lengthOfLIS(self, nums):\n n = len(nums)\n if n <= 1:\n return n\n\n max_len = 0\n\n dp = [0] * n\n for i, num in enumerate(nums):\n if i == 0:\n dp[0] = 1\n max_len = 1\n else:\n prev_max = 0\n for j in xrange(i):\n if nums[j] < num:\n prev_max = max(prev_max, dp[j])\n dp[i] = prev_max + 1\n max_len = max(max_len, dp[i])\n\n return max_len",
"def longest_increasing_sub_seq(A):\n\n # boundary cases\n\n # The lenght the of the given list\n arr_len = len(A)\n\n if arr_len <= 1:\n return arr_len\n\n # Create an auxiliary array that will hold the \"end elements\"\n # of the intermeditae LIS' that we will be creating\n\n aux_array = [0 for _ in range(arr_len + 1)]\n\n # Initialize aux_array[0] = A[0]\n aux_array[0] = A[0]\n\n # l acts as our pointer, always points to an empty slot\n l = 1\n\n # Now iterate through the array\n for i in range(1, arr_len):\n if A[i] < aux_array[0]:\n # This is the new smallest value\n # Replace aux_array[0] = A[i]\n\n # i.e we are starting over again, creating a new active list of lenght 1\n # Case 1\n aux_array[0] = A[i]\n\n elif A[i] > aux_array[l - 1]:\n # Case 2: A[i] is largets among all active lists\n aux_array[l] = A[i]\n l += 1\n\n else:\n # Case 3\n # A[i] is in between\n # A[i] wants to be current end candidate of an existing subsequence\n index = get_ceil_index(-1, l - 1, A[i], aux_array)\n aux_array[index] = A[i]\n\n\n return l",
"def find_max_continous_sequence(array, start):\n pos = start\n while pos + 1 < len(array):\n if not array[pos] + 1 == array[pos + 1]:\n break\n pos += 1\n if pos + 1 == len(array):\n return array[start:]\n return array[start:pos + 1]",
"def get_long_len(nums):\n return len(str(max(nums + [sum(nums)])))",
"def lengthOfLIS(self, nums: List[int]) -> int:\n# time complexity: O(n^2), space complexity: O(n)\n# this is inspired by the solution provided by the question.\n# dp\n# the idea is to use a list longest to record say i-th element in nums, if as the last of the longest possible subsquence, how long the subsquence would be.\n \n\n# time complexity: O(nlogn), space complexity: O(n)\n# dp with binary search\n# the key idea is to use a list to store the longest possible sequence, but the element in the list is not necessarily correct. Every element say record_long[i] in the list means the end of longest subsequence of length i+1\n# this is inspired by @bolinq in the discussion area.\n import bisect\n record_long = []\n for num in nums:\n index = bisect.bisect_left(record_long, num)\n if index == len(record_long):\n record_long.append(num)\n else:\n record_long[index] = num\n \n return len(record_long)",
"def lengthOfLIS(self, nums: List[int]) -> int:\n n = len(nums)\n F = [0] * n\n \n F[0] = 1\n for i in range(1, n):\n sub_lengths = [0]\n for j in range(0, i):\n if nums[j] < nums[i]:\n sub_lengths.append(F[j])\n F[i] = max(sub_lengths) + 1\n \n return max(F)",
"def lenLongestFibSubseq(self, arr: list[int]) -> int:\n dp = {}\n memo = set(arr)\n N = len(arr)\n for j in range(N):\n for i in range(j):\n a, b = arr[i], arr[j]\n if b - a < a and b - a in memo:\n dp[(a, b)] = dp.get((b - a, a), 2) + 1\n\n return max(dp.values() or [0])",
"def longestPalindromeSubseq(self, s: str) -> int:\n n = len(s)\n dp = [[1] * n for _ in range(n)]\n for length in range(1, n + 1):\n for i in range(n - length + 1):\n j = i + length - 1\n print(i, j)\n if length == 1:\n dp[i][j] = 1\n elif s[i] == s[j]:\n dp[i][j] = dp[i + 1][j - 1] + 2\n else:\n dp[i][j] = max(dp[i][j - 1], dp[i + 1][j])\n return dp[0][n - 1]",
"def longest_seq(n):\n max_seq = 0\n for i in range(SEQ_LENGTH):\n max_seq = max(max_seq, longest_seq_of_1s(n, i))\n\n return max_seq",
"def longest_run(L):\r\n # save the current longest length for increasing run\r\n length_inc = []\r\n # save the current longest length for decreasing run\r\n length_dec = []\r\n # set the initial length to 1\r\n length_inc.append(1)\r\n length_dec.append(1)\r\n # save the result\r\n result_sum = 0\r\n # save the longest length\r\n longest_length = 0\r\n\r\n for i in range(1, len(L)):\r\n # assume the current longest length to 1\r\n length_inc.append(1)\r\n length_dec.append(1)\r\n # for increasing\r\n if L[i] >= L[i - 1]:\r\n length_inc[i] = length_inc[i - 1] + 1\r\n if length_inc[i] > longest_length:\r\n # update result\r\n longest_length = length_inc[i]\r\n result_sum = sum(L[i - longest_length + 1: i + 1])\r\n # for decreasing\r\n if L[i] <= L[i - 1]:\r\n length_dec[i] = length_dec[i - 1] + 1\r\n if length_dec[i] > longest_length:\r\n # update result\r\n longest_length = length_dec[i]\r\n result_sum = sum(L[i - longest_length + 1: i + 1])\r\n return result_sum",
"def lengthOfLongestSubstring(s):\n arr = [1] * len(s)\n i = 0\n j = 1\n while j < len(s):\n if s[j] not in s[i:j]:\n arr[i] += 1\n j = j + 1\n else:\n i = i + 1\n j = i + 1\n return max(arr)",
"def maxLength(self, arr: List[str]) -> int:\r\n res = 0\r\n for p in powerset(arr):\r\n allChars = \"\".join(w for w in p)\r\n if len(allChars) == len(set(allChars)):\r\n res = max(res, len(allChars))\r\n return res",
"def longincseq(v):\n n=len(v)\n if n==0: return -1\n l = 0\n u = n-1\n max2here=1\n maxsofar=1\n for i in xrange(l+1, u+1):\n if v[i]>v[i-1]: \n max2here+=1\n else:\n max2here=1\n maxsofar = max(maxsofar, max2here)\n return maxsofar",
"def find_lis(seq):\n\n # https://rosettacode.org/wiki/Longest_increasing_subsequence#Python:_O.28nlogn.29_Method_from_Wikipedia.27s_LIS_Article.5B1.5D\n\n l = len(seq)\n previous = [0] * l\n minimum = [0] * (l + 1)\n length = 0\n for i in range(l):\n low = 1\n high = length\n while low <= high:\n mid = (low + high) // 2\n if seq[minimum[mid]] < seq[i]:\n low = mid + 1\n else:\n high = mid - 1\n\n new = low\n previous[i] = minimum[new - 1]\n minimum[new] = i\n\n if new > length:\n length = new\n\n s = []\n k = minimum[length]\n for i in range(length - 1, -1, -1):\n s.append(seq[k])\n k = previous[k]\n return s[::-1]",
"def longest_increasing_subsequence(X):\r\n N = len(X)\r\n P = [0] * N\r\n M = [0] * (N+1) \r\n L = 0\r\n for i in range(N):\r\n lo = 1\r\n hi = L\r\n while lo <= hi:\r\n mid = (lo+hi)//2\r\n if (X[M[mid]] < X[i]):\r\n lo = mid+1\r\n else:\r\n hi = mid-1\r\n \r\n newL = lo\r\n P[i] = M[newL-1] \r\n M[newL] = i\r\n #print(newL)\r\n #print(M[L])\r\n \r\n if (newL > L):\r\n L = newL\r\n S = []\r\n k = M[L]\r\n for i in range(L-1, -1, -1):\r\n S.append(X[k])\r\n k = P[k]\r\n print(S)\r\n print(k+1)\r\n \r\n\r\n print('\\nLength of obtained LIS for 30 days stock prices is :: %d'%(len(S)))\r\n return S[::-1]",
"def get_length(array):\n return len(list(array))",
"def get_seq_lenght(seq_arry, end_symbol):\n scale_arry = np.argmax(seq_arry, axis=2) + np.sum(seq_arry, axis=2)\n end_symbol_scale = np.argmax(end_symbol) + np.sum(end_symbol)\n cond = (scale_arry != end_symbol_scale).astype(np.int)\n lens = cond.sum(axis=1)\n return lens",
"def longest_sequence(start=1, end=1000000):\n\n max_length = 0\n max_start_value = 0\n\n # generate sequence for each value\n for i in range(start, end):\n current = generate_sequence(i)\n\n # if the current sequence is the longest, update values\n if len(current) > max_length:\n max_length = len(current)\n max_start_value = i\n\n return max_length, max_start_value",
"def find_longest(input):\r\n for thing in input:\r\n print thing\r\n dist_array = [[0 for x in range(rows)] for x in range(cols)] # rows and cols are static variables in main method\r\n for x in xrange(0, len(input), 1):\r\n for y in xrange(0, len(input[x]), 1):\r\n dist_array[x][y] = calculate_longest(dist_array, input, x, y)\r\n for item in dist_array:\r\n print item\r\n return max(max(dist_array))",
"def length_asc_seq(seq):\n if not seq:\n return 0\n\n result = 0\n asc_seq = [1] * len(seq)\n\n for i in range(len(seq)):\n for j in range(i):\n if seq[j] <= seq[i]:\n asc_seq[i] = max(asc_seq[j] + 1, asc_seq[i])\n\n result = max(result, asc_seq[i])\n\n return result",
"def lengthOfLIS(self, nums):\n def _binsearch(lst, target):\n lo, hi = 0, len(lst)\n while lo < hi:\n mid = (lo+hi) // 2\n \n if lst[mid] < target:\n lo = mid+1\n else:\n hi = mid\n return lo\n\n tails = []\n\n for num in nums:\n if not tails or num > tails[-1]:\n tails.append(num)\n else:\n idx = _binsearch(tails, num)\n tails[idx] = num\n return len(tails)",
"def dynamic_programming(D):\n # Runtime: O(n^2)\n n = len(D)\n if n == 0:\n return 0\n longest = []\n for i in range(0, n):\n max_append = []\n for j in range(0, i):\n if D[i] >= D[j] and len(longest[j]) > len(max_append):\n max_append = longest[j]\n longest.append(max_append + [D[i]])\n\n return max(map(lambda s: len(s), longest))",
"def maxSubArray(self, nums: List[int]) -> int:\n # O(n) solution\n # 我们定义函数 S(i) ,它的功能是计算以 0(包括 0)开始加到 i(包括 i)的值。\n # 那么 S(j) - S(i - 1) 就等于 从 i 开始(包括 i)加到 j(包括 j)的值\n # 我们进一步分析,实际上我们只需要遍历一次计算出所有的 S(i), 其中 i = 0,1,2....,n-1。\n # 然后我们再减去之前的 S(k),其中 k = 0,1,i - 1,中的最小值即可。 因此我们需要 用一个变量来维护这个最小值,还需要一个变量维护最大值。\n max_sum = nums[0]\n min_sum_from_start = curr_sum = 0\n for i in range(len(nums)):\n curr_sum = curr_sum + nums[i]\n if curr_sum - min_sum_from_start > max_sum:\n max_sum = curr_sum-min_sum_from_start\n if curr_sum < min_sum_from_start:\n min_sum_from_start = curr_sum\n return max_sum",
"def determine_max_length(sequences, ids):\n max_len = 0\n for i in ids:\n if len(sequences[i]) > max_len:\n max_len = len(sequences[i])\n\n return max_len",
"def maxSubArray(self, nums) -> int:\n maxsub = -10000000\n i = 0\n solutions = self.create_matrix(nums)\n if len(nums)==1:\n return nums[0]\n while i <= len(nums) -1:\n j=i\n while j <= len(nums)-1:\n sum_ij = solutions[i][j]\n if sum_ij > maxsub:\n maxsub = sum_ij\n j+=1\n i +=1\n return maxsub",
"def find_longest_plateau(seq):\n\n start_longest_so_far = 0\n length_longest_so_far = 0\n i = 0\n\n # INVARIANT\n # The longest plateau in seq[0:i] starts at position\n # start_longest_so_far and has a length of\n # length_longest_so_far\n # VARIANT: len(seq) - i\n #\n while len(seq) - i > length_longest_so_far:\n\n length_current_plateau = length_plateau_at(seq, i)\n\n if length_current_plateau > length_longest_so_far:\n start_longest_so_far = i\n length_longest_so_far = length_current_plateau\n\n i += length_current_plateau\n\n return start_longest_so_far",
"def longest_subsequence(s1: str, s2: str, s3: str) -> int:\n # Find the shortest string\n s = min(s1, s2, s3, key=lambda x: len(x))\n total = 0\n ok = False\n while s1:\n for i1, c1 in enumerate(s1):\n for i2, c2 in enumerate(s2):\n if ok:\n ok = False\n break\n if c1 == c2:\n for i3, c3 in enumerate(s3):\n if c1 == c3:\n total += 1\n s1 = s1[i1 + 1:]\n s2 = s2[i2 + 1:]\n s3 = s3[i3 + 1:]\n ok = True\n break\n\n return total"
] | [
"0.7756621",
"0.7332605",
"0.6925975",
"0.68926334",
"0.6865751",
"0.68350095",
"0.66415817",
"0.6594408",
"0.65523106",
"0.6539818",
"0.65185964",
"0.6484091",
"0.64561516",
"0.64262223",
"0.6355707",
"0.62699634",
"0.6269051",
"0.62369823",
"0.6226302",
"0.6150499",
"0.61169815",
"0.6112665",
"0.60968584",
"0.6085501",
"0.60359913",
"0.6009804",
"0.600803",
"0.5998604",
"0.59950787",
"0.5988388"
] | 0.7476641 | 1 |
Create an Action from this intent, filling missing data from state | def at(self, state):
self.complete_data(state)
self.check_duplicate(state)
action = entities.Action(
action_id=new_id(state),
type=self.get_type_name(),
data=pmap(self.data),
time=state.context.time,
randomness=state.context.randomness,
version=state.context.version,
)
return action | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _formulate_action(Action, **kwargs):\n\n return Action(**kwargs)",
"def action(self, action_id):\r\n return Action(self, action_id)",
"def action(self, action_id):\r\n return Action(self, action_id)",
"def from_of_action(cls, of_action):\n return cls()",
"def create_action(instance, verb, user):\n return instance.activities.create(action=verb, owner=user)",
"def _createAction(self, item, actionString):\n action = {\n \"action\": actionString,\n \"item_id\": item.item_id\n }\n\n pocketLogger.debug(\"Action\" + str(action))\n\n self.actions.append(action)",
"def buildActionSpace(self):\n self.action_types = self.AGENT_TYPES\n self.action_space = Dict({\n \"action\": Discrete(len(self.AGENT_TYPES)), \n })\n self.action_space.shape = (len(self.action_types),)",
"def ToAction(self):\n action = self.action_key.get()\n if not action:\n raise ValueError('Test run action %s not found' % self.action_key)\n options = NameValuePair.ToDict(action.options or [])\n options.update(NameValuePair.ToDict(self.options or []))\n action.options = NameValuePair.FromDict(options)\n return action",
"def __init__(self, action=0):\n self.action = action",
"def __init__(self, action, dateTime, outcome, outcomeDesc,\n purposeOfEvent, subtype, type):\n self.action = action\n self.dateTime = dateTime\n self.outcome = outcome\n self.outcomeDesc = outcomeDesc\n self.purposeOfEvent = purposeOfEvent\n self.subtype = subtype\n self.type = type",
"def from_of_action(cls, of_action):\n return cls(queue_id=of_action.queue_id.value)",
"def action():\n \n action={}\n action[\"type\"]=random.choice(actions)\n action[\"date\"]=actionDate()\n\n if action[\"type\"]==\"file\":\n action[\"to_removable_media\"]=random.choice([True,False])\n action[\"from_removable_media\"]=random.choice([True,False])\n elif action[\"type\"]==\"email\":\n action[\"activity\"]=random.choice([\"Send\",\"View\"])\n return action",
"def __init__(self):\n if not isinstance(getattr(self, 'ACTION_NAME', None), basestring):\n raise AttributeError(_NO_ACTION_NAME_MSG % self.__class__.__name__)\n if not isinstance(getattr(self, 'FRIENDLY_NAME', None), basestring):\n raise AttributeError(_NO_FRIENDLY_NAME_MSG % self.__class__.__name__)\n try:\n if not inspect.ismethod(super(BaseAction, self).__getattribute__('run')):\n raise AttributeError()\n except AttributeError:\n raise AttributeError(_NO_RUN_METHOD_MSG % self.__class__.__name__)\n self.action_type = getattr(self, 'ACTION_TYPE', ActionType.ASYNC)\n if self.action_type not in (ActionType.SYNC, ActionType.ASYNC):\n raise AttributeError(\n _BAD_ACTION_TYPE_MSG %\n (self.__class__.__name__, str(self.action_type)))",
"def __init__(self, action_type=None, length=None):\n super().__init__()\n self.action_type = action_type\n self.length = length",
"def _make_random_action(action_spec, observation):\n # Sample the random action.\n action = {}\n for name, spec in action_spec.items():\n if name == \"Index\":\n value = np.random.randint(observation[\"n_edge\"])\n elif spec.dtype in (np.int32, np.int64, int):\n value = np.random.randint(spec.minimum, spec.maximum + 1)\n else:\n value = np.random.uniform(spec.minimum, spec.maximum)\n action[name] = value\n return action",
"def actions(self, state):\n raise NotImplementedError # Override this!",
"def convert_to_low_level_action(self, i_state, action):\n pass",
"def take_action(self, state):\n action = super(SarsaAgent, self).take_action(state)\n if self.learning:\n self.update_q_values(state, self.q_value((state, action)))\n self.prev_state = state\n self.prev_action = action\n self.prev_q_val = self.q_values[self.represent_state(self.prev_state), self.prev_action]\n self.log(\"size of q_values {0}\\nprev state {1}\\nprev action {2}\\nprev q-val {3}\"\n .format(len(self.q_values), self.prev_state, self.prev_action, self.prev_q_val))\n return action",
"def create_action(self, *args, **kwargs):\n action_group = kwargs.pop('action_group', None)\n act = QAction(*args, **kwargs)\n if action_group:\n act.setActionGroup(action_group)\n\n return act",
"def actions(self, state):\n\t\traise NotImplementedError",
"def __call__(self, state, action):\n pass",
"def take_action(self, state):",
"def _take_action(self, action):\n # Get transition probabilities for all potential next state values\n trans_probs = self.transition_probabilities[self.cur_state, action]\n\n # Generate an array of next state options to choose from\n next_state_options = np.linspace(0, self.n_states-1, self.n_states,\n dtype=int)\n\n # Sample from new state options based on the transition probabilities\n new_state = np.random.choice(next_state_options, p=trans_probs)\n\n return new_state",
"def from_of_action(cls, of_action):\n return cls(port=of_action.port.value)",
"def _fill_action_info(action):\n def _is_ascii(s):\n return all(ord(c) < 128 for c in s)\n\n if not _is_ascii(action.obj_desc_str):\n tf.logging.info('Found an unconvertable unicode %s', action.obj_desc_str)\n return\n\n if not (isinstance(action.verb_str, str) and isinstance(\n action.obj_desc_str, str) and isinstance(action.input_content_str, str)):\n return\n action.regularize_strs()\n input_str_pos_padding = [\n config.LABEL_DEFAULT_VALUE_INT, config.LABEL_DEFAULT_VALUE_INT\n ]\n\n input_prep_word = _get_input_prep_word()\n swipe_prep_word = _get_swipe_prep_word()\n\n if action.action_rule == common.ActionRules.NO_VERB_RULE:\n action.instruction_str = action.obj_desc_str\n action.verb_str_pos = [0, 0]\n action.obj_str_pos = [0, _count_chars(action.obj_desc_str)]\n action.input_str_pos = input_str_pos_padding\n return\n\n if action.action_type in [common.ActionTypes.CLICK]:\n action.instruction_str = '%s %s' % (action.verb_str, action.obj_desc_str)\n action.verb_str_pos = [0, _count_chars(action.verb_str)]\n action.obj_str_pos = [\n _count_chars(action.verb_str) + 1,\n _count_chars(action.instruction_str)\n ]\n action.input_str_pos = input_str_pos_padding\n\n elif action.action_type in [common.ActionTypes.INPUT]:\n # There is no space between 4th and 5th string because the 2nd string,\n # article word, is optional.\n action.instruction_str = '%s %s %s %s' % (\n action.verb_str, action.input_content_str, input_prep_word,\n action.obj_desc_str)\n action.verb_str_pos = [0, _count_chars(action.verb_str)]\n action.input_str_pos = [\n _count_chars(action.verb_str) + 1,\n _count_chars('%s %s' % (action.verb_str, action.input_content_str))\n ]\n action.obj_str_pos = [\n _count_chars(\n '%s %s %s' %\n (action.verb_str, action.input_content_str, input_prep_word)) + 1,\n _count_chars(action.instruction_str)\n ]\n # All the rests are swipe actions\n else:\n action.instruction_str = '%s %s %s' % (action.verb_str, swipe_prep_word,\n action.obj_desc_str)\n action.verb_str_pos = [0, _count_chars(action.verb_str)]\n action.input_str_pos = input_str_pos_padding\n action.obj_str_pos = [\n _count_chars('%s %s' % (action.verb_str, swipe_prep_word)) + 1,\n _count_chars(action.instruction_str)\n ]",
"def new(cls, gtk_action, parent):\n # This code is similar to code in the loader, investigate\n # if we can use more code reusage\n name = gtk_action.get_name()\n label = gtk_action.get_property('label')\n short_label = gtk_action.get_property('short-label')\n is_important = gtk_action.get_property('is-important')\n tooltip = gtk_action.get_property('tooltip')\n stock_id = gtk_action.get_property('stock-id') or None\n gaction = cls(parent, name, label, short_label, is_important,\n tooltip, stock_id)\n\n # check if it has accelerator\n accel_entry = gtk.accel_map_lookup_entry('<Actions>/%s/%s' %\n (parent.name, name))\n if accel_entry:\n key, modifier = accel_entry\n if key != 0:\n gaction.accelerator = gtk.accelerator_name(key, modifier)\n\n # check if it has signal handler\n callback = gtk_action.get_data('handler')\n if callback:\n gaction.callback = callback\n\n return gaction",
"def create_action(self, action: Action, query_params: Dict[str, object] = None) -> Action:\n if query_params is None:\n query_params = {}\n\n path_params = {\n }\n\n path = Template(\"/action/v1beta2/actions\").substitute(path_params)\n url = self.base_client.build_url(path)\n data = action.to_dict()\n response = self.base_client.post(url, json=data, params=query_params)\n return handle_response(response, Action)",
"def __init__(self,actionID,actiontype,policy,**kwargs):\n\t\tself._actionID \t = actionID\n\t\tself._actiontype = actiontype\n\t\tself._policy \t = policy\n\t\tself._params = kwargs",
"def create(action_data, page):\n return Action.objects.create(**{\n \"action\": action_data,\n \"page\": page\n })",
"def action(self, gstate, actions=None):\n raise NotImplementedError"
] | [
"0.63214076",
"0.6280206",
"0.6280206",
"0.6092075",
"0.60566056",
"0.5983633",
"0.5972293",
"0.59379184",
"0.58593243",
"0.58537024",
"0.5842147",
"0.5834748",
"0.5764666",
"0.5716841",
"0.5701877",
"0.5692661",
"0.5685065",
"0.56532115",
"0.5642375",
"0.56263447",
"0.56133634",
"0.5593593",
"0.5580838",
"0.55745745",
"0.55703413",
"0.556401",
"0.5559264",
"0.5555547",
"0.5548462",
"0.5531085"
] | 0.72059274 | 0 |
Match the calibSources and sources, and propagate Interesting Flags (e.g. PSF star) to the sources | def propagateCalibFlags(keysToCopy, calibSources, sources, matchRadius=1):
if calibSources is None or sources is None:
return
closest = False # return all matched objects
matched = afwTable.matchRaDec(calibSources, sources, matchRadius*afwGeom.arcseconds, closest)
#
# Because we had to allow multiple matches to handle parents, we now need to
# prune to the best matches
#
bestMatches = {}
for m0, m1, d in matched:
id0 = m0.getId()
if bestMatches.has_key(id0):
if d > bestMatches[id0][2]:
continue
bestMatches[id0] = (m0, m1, d)
matched = bestMatches.values()
#
# Check that we got it right
#
if len(set(m[0].getId() for m in matched)) != len(matched):
print("At least one calibSource is matched to more than one Source")
#
# Copy over the desired flags
#
for cs, s, d in matched:
skey, ckey = keysToCopy[0]
s.setFlag(skey, True)
for skey, ckey in keysToCopy[1:]:
s.set(skey, cs.get(ckey)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sources_extraction(image,sextractor_pars):\n\n cat_name, detect_minarea, detect_thresh, analysis_thresh, phot_aperture, satur_level, ZP, gain, pixelScale,seeing,back_type,back_value,back_size,backphoto_type,backphoto_thick,back_filterthresh,checkimage_type,checkimage_name= sextractor_pars\n sp.run('sex %s.fits -c gft.sex -CATALOG_NAME %s.cat -CATALOG_TYPE ASCII_HEAD -PARAMETERS_NAME gft.param -DETECT_TYPE CCD -DETECT_MINAREA %d -DETECT_THRESH %d -ANALYSIS_THRESH %d -PHOT_APERTURES %d -SATUR_LEVEL %d -MAG_ZEROPOINT %f -GAIN %f -PIXEL_SCALE %f -SEEING_FWHM %f -BACK_TYPE %s -BACK_VALUE %f -BACK_SIZE %d -BACKPHOTO_TYPE %s -BACKPHOTO_THICK %d -BACK_FILTTHRESH %f -CHECKIMAGE_TYPE %s -CHECKIMAGE_NAME %s.fits ' % (image,cat_name, detect_minarea, detect_thresh, analysis_thresh, phot_aperture, satur_level, ZP, gain, pixelScale,seeing,back_type,back_value,back_size,backphoto_type,backphoto_thick,back_filterthresh,checkimage_type,checkimage_name),shell=True)",
"def _filterTargetLynxIS(self, **kwargs):\n\n # check excludedImport exist (ensures functions are run in the right order)\n if ((not hasattr(self, 'sampleMetadataExcluded')) | (not hasattr(self, 'featureMetadataExcluded')) | (not hasattr(self, 'intensityDataExcluded')) | (not hasattr(self, 'expectedConcentrationExcluded')) | (not hasattr(self, 'excludedFlag'))):\n raise AttributeError('sampleMetadataExcluded, featureMetadataExcluded, intensityDataExcluded, expectedConcentrationExcluded or excludedFlag have not bee previously initialised')\n # check calibration dictionary exist (ensures functions are run in the right order)\n if not hasattr(self, 'calibration'):\n raise AttributeError('calibration dictionary has not been previously initialised')\n\n sampleMetadata = copy.deepcopy(self.sampleMetadata)\n featureMetadata = copy.deepcopy(self.featureMetadata)\n intensityData = copy.deepcopy(self._intensityData)\n expectedConcentration = copy.deepcopy(self.expectedConcentration)\n excludedImportSampleMetadata = copy.deepcopy(self.sampleMetadataExcluded)\n excludedImportFeatureMetadata = copy.deepcopy(self.featureMetadataExcluded)\n excludedImportIntensityData = copy.deepcopy(self.intensityDataExcluded)\n excludedImportExpectedConcentration = copy.deepcopy(self.expectedConcentrationExcluded)\n excludedImportFlag = copy.deepcopy(self.excludedFlag)\n calibration = copy.deepcopy(self.calibration)\n peakInfo = copy.deepcopy(self.peakInfo)\n\n # Feature to keep\n keptFeat = ~featureMetadata['IS'].values.astype(bool)\n # Filter\n tmpFeatureMetadata = featureMetadata.loc[keptFeat, :]\n tmpIntensityData = intensityData[:, keptFeat]\n tmpExpectedConcentration = expectedConcentration.loc[:, keptFeat]\n tmpCalibFeatureMetadata = calibration['calibFeatureMetadata'].loc[keptFeat, :]\n tmpCalibIntensityData = calibration['calibIntensityData'][:, keptFeat]\n tmpCalibExpectedConcentration = calibration['calibExpectedConcentration'].loc[:, keptFeat]\n tmpCalibPeakResponse = calibration['calibPeakInfo']['peakResponse'].loc[:, keptFeat]\n tmpCalibPeakArea = calibration['calibPeakInfo']['peakArea'].loc[:, keptFeat]\n tmpCalibPeakConcentrationDeviation = calibration['calibPeakInfo']['peakConcentrationDeviation'].loc[:, keptFeat]\n tmpCalibPeakIntegrationFlag = calibration['calibPeakInfo']['peakIntegrationFlag'].loc[:, keptFeat]\n tmpCalibPeakRT = calibration['calibPeakInfo']['peakRT'].loc[:, keptFeat]\n tmpCalibPeakInfo = {'peakResponse': tmpCalibPeakResponse, 'peakArea': tmpCalibPeakArea, 'peakConcentrationDeviation': tmpCalibPeakConcentrationDeviation, 'peakIntegrationFlag': tmpCalibPeakIntegrationFlag, 'peakRT': tmpCalibPeakRT}\n tmpCalibration = {'calibSampleMetadata': calibration['calibSampleMetadata'], 'calibFeatureMetadata': tmpCalibFeatureMetadata, 'calibIntensityData': tmpCalibIntensityData, 'calibExpectedConcentration': tmpCalibExpectedConcentration, 'calibPeakInfo': tmpCalibPeakInfo}\n tmpPeakResponse = peakInfo['peakResponse'].loc[:, keptFeat]\n tmpPeakArea = peakInfo['peakArea'].loc[:, keptFeat]\n tmpPeakConcentrationDeviation = peakInfo['peakConcentrationDeviation'].loc[:, keptFeat]\n tmpPeakIntegrationFlag = peakInfo['peakIntegrationFlag'].loc[:, keptFeat]\n tmpPeakRT = peakInfo['peakRT'].loc[:, keptFeat]\n tmpPeakInfo = {'peakResponse': tmpPeakResponse, 'peakArea': tmpPeakArea, 'peakConcentrationDeviation': tmpPeakConcentrationDeviation, 'peakIntegrationFlag': tmpPeakIntegrationFlag, 'peakRT': tmpPeakRT}\n\n # Features to exclude\n ISFeat = ~keptFeat\n if sum(ISFeat) != 0:\n excludedImportSampleMetadata.append(sampleMetadata)\n excludedImportFeatureMetadata.append(featureMetadata.loc[ISFeat, :])\n excludedImportIntensityData.append(intensityData[:, ISFeat])\n excludedImportExpectedConcentration.append(expectedConcentration.loc[:, ISFeat])\n excludedImportFlag.append('Features')\n\n # Clean columns\n tmpFeatureMetadata.reset_index(drop=True, inplace=True)\n tmpCalibration['calibFeatureMetadata'].reset_index(drop=True, inplace=True)\n tmpFeatureMetadata = tmpFeatureMetadata.drop(['IS', 'TargetLynx IS ID'], axis=1)\n\n # Output\n self.featureMetadata = tmpFeatureMetadata\n self._intensityData = tmpIntensityData\n self.expectedConcentration = tmpExpectedConcentration\n self.sampleMetadataExcluded = excludedImportSampleMetadata\n self.featureMetadataExcluded = excludedImportFeatureMetadata\n self.intensityDataExcluded = excludedImportIntensityData\n self.expectedConcentrationExcluded = excludedImportExpectedConcentration\n self.excludedFlag = excludedImportFlag\n self.calibration = tmpCalibration\n self.peakInfo = tmpPeakInfo\n\n # log the modifications\n print(sum(keptFeat), 'feature are kept for processing,',sum(ISFeat),'IS removed')\n print('-----')\n self.Attributes['Log'].append([datetime.now(), '%d features kept for processing (%d samples). %d IS features filtered.' % (sum(keptFeat), self.noSamples, sum(ISFeat))])",
"def readSources(self):\n for sourceCount, sourceElement in enumerate(self.root.findall(\".sources/source\")):\n # shall we just read the UFO here?\n filename = sourceElement.attrib.get('filename')\n # filename is a path relaive to the documentpath. resolve first.\n sourcePath = os.path.abspath(os.path.join(os.path.dirname(self.path), filename))\n sourceName = sourceElement.attrib.get('name')\n if sourceName is None:\n # if the source element has no name attribute\n # (some authoring tools do not need them)\n # then we should make a temporary one. We still need it for reference.\n sourceName = \"temp_master.%d\"%(sourceCount)\n self.reportProgress(\"prep\", 'load', sourcePath)\n if not os.path.exists(sourcePath):\n raise MutatorError(\"Source not found at %s\"%sourcePath)\n sourceObject = self._instantiateFont(sourcePath)\n # read the locations\n sourceLocationObject = None\n sourceLocationObject = self.locationFromElement(sourceElement)\n\n if sourceLocationObject is None:\n raise MutatorError(\"No location defined for source %s\"%sourceName)\n\n # read lib flag\n for libElement in sourceElement.findall('.lib'):\n if libElement.attrib.get('copy') == '1':\n self.libSource = sourceName\n\n # read the groups flag\n for groupsElement in sourceElement.findall('.groups'):\n if groupsElement.attrib.get('copy') == '1':\n self.groupsSource = sourceName\n\n # read the info flag\n for infoElement in sourceElement.findall(\".info\"):\n if infoElement.attrib.get('copy') == '1':\n self.infoSource = sourceName\n if infoElement.attrib.get('mute') == '1':\n self.muted['info'].append(sourceName)\n\n # read the features flag\n for featuresElement in sourceElement.findall(\".features\"):\n if featuresElement.attrib.get('copy') == '1':\n if self.featuresSource is not None:\n self.featuresSource = None\n else:\n self.featuresSource = sourceName\n\n mutedGlyphs = []\n for glyphElement in sourceElement.findall(\".glyph\"):\n glyphName = glyphElement.attrib.get('name')\n if glyphName is None:\n continue\n if glyphElement.attrib.get('mute') == '1':\n if not sourceName in self.muted['glyphs']:\n self.muted['glyphs'][sourceName] = []\n self.muted['glyphs'][sourceName].append(glyphName)\n\n for kerningElement in sourceElement.findall(\".kerning\"):\n if kerningElement.attrib.get('mute') == '1':\n self.muted['kerning'].append(sourceName)\n\n # store\n self.sources[sourceName] = sourceObject, sourceLocationObject\n self.reportProgress(\"prep\", 'done')",
"def pattern_matching(pattern_base, cc_pattern_base):\n papers = [os.path.join(target_folder, paper) for paper in os.listdir(target_folder) if \".xml\" in paper]\n \n for paper in papers:\n paper_text = open(paper[:paper.index('.')]+\".txt\", 'r').read()\n \n annotator = detect_change_events(paper, pattern_base, paper_text) \n annotator = detect_cause_correlation(paper_text, cc_pattern_base, annotator)\n \n # Write the annotations to file\n with open(paper[:paper.index('.')]+\".ann\", 'w') as annfile:\n for annotation in annotator.annotations:\n annfile.write(annotation+\"\\n\")",
"def add_source_achors():\n pass",
"def get_source_patch_masks(self):\n self.source_patch_masks = {\n patch_center: self.get_patch_mask(patch_center)\n for patch_center in self.patch_centers\n if not np.bitwise_and(self.get_patch_mask(patch_center), self.unknown_mask).any()\n }\n self.patch_centers = tuple(list(self.source_patch_masks.keys()))",
"def associate(conn, detected_sources, imobj, search_radius, save):\n # Find image resolution class\n for config, res_range in res_dict.items():\n if res_range[0] < imobj.bmin <= res_range[1]:\n res_class = config\n \n # Extract all previously detected sources in the same FOV\n assoc_rows = cone_search(conn, 'assoc_source', imobj.obs_ra,\n imobj.obs_dec, search_radius)\n match_logger.info('Extracted {} sources from assoc_source table '\n 'within {} degrees.'.format(\n len(assoc_rows), search_radius))\n # Limit to sources taken from images of similar resolution\n if len(assoc_rows) > 0:\n filtered_assoc_rows = filter_res(assoc_rows, res_class)\n else:\n filtered_assoc_rows = []\n\n if not filtered_assoc_rows:\n # No previous sources found in that sky region at that resolution\n for src in detected_sources:\n src.res_class = res_class\n src.ndetect = 1\n detected_matched = []\n detected_unmatched = detected_sources\n assoc_matched = []\n assoc_unmatched = []\n else:\n # Translate row dictionaries to DetectedSource objects\n assoc_sources = []\n assoc_ids = []\n for asrc in filtered_assoc_rows:\n assoc_ids.append(asrc['id'])\n assoc_sources.append(dbclasses.DetectedSource())\n dbclasses.dict2attr(assoc_sources[-1], asrc)\n match_logger.info('Attempting to match {} sources from this image to '\n '{} sources previously detected in VLITE images...'.\n format(len(detected_sources), len(assoc_sources)))\n\n detected_matched = []\n detected_unmatched = []\n assoc_matched = []\n assoc_unmatched = []\n\n cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\n # Print results without saving to database\n if not save:\n # Dump detected_sources into temporary table\n sql = (\n '''\n CREATE TEMP TABLE temp_source (\n src_id INTEGER,\n ra DOUBLE PRECISION,\n dec DOUBLE PRECISION\n );\n ''')\n cur.execute(sql)\n conn.commit()\n for src in detected_sources:\n cur.execute('''INSERT INTO temp_source (\n src_id, ra, dec) VALUES (%s, %s, %s)''', (\n src.src_id, src.ra, src.dec))\n conn.commit()\n # Find nearest neighbor & \"match\" if within half a beam\n sql = '''SELECT a.src_id, bb.id AS assoc_id,\n 3600*q3c_dist(a.ra, a.dec, bb.ra, bb.dec) AS sep,\n 3600*q3c_dist(a.ra, a.dec, bb.ra, bb.dec) < %s AS match\n FROM temp_source AS a, LATERAL (\n SELECT b.* FROM assoc_source AS b WHERE b.id IN %s\n ORDER BY q3c_dist(a.ra, a.dec, b.ra, b.dec) ASC LIMIT 1)\n AS bb'''\n values = (0.5*imobj.bmin, tuple(assoc_ids))\n cur.execute(sql, values)\n rows = cur.fetchall()\n cur.execute('DROP TABLE temp_source')\n conn.commit()\n match_logger.info('-----------------------------------------------'\n '-----------------------------------------------'\n '---------------------------------')\n match_logger.info('src_id match assoc_id\\tra\\t\\te_ra\\t\\t\\tdec\\t\\t'\n 'e_dec\\t\\tseparation (arcsec)\\tndetect')\n match_logger.info('-----------------------------------------------'\n '-----------------------------------------------'\n '---------------------------------')\n # Save association results for database\n else:\n # Find nearest neighbor & \"match\" if within half a beam\n sql = '''SELECT a.src_id, bb.id AS assoc_id,\n 3600*q3c_dist(a.ra, a.dec, bb.ra, bb.dec) AS sep,\n 3600*q3c_dist(a.ra, a.dec, bb.ra, bb.dec) < %s AS match\n FROM detected_source AS a, LATERAL (\n SELECT b.* FROM assoc_source AS b\n WHERE a.image_id = %s AND b.id IN %s ORDER BY\n q3c_dist(a.ra, a.dec, b.ra, b.dec) ASC LIMIT 1) AS bb'''\n values = (0.5*imobj.bmin, imobj.id, tuple(assoc_ids))\n cur.execute(sql, values)\n rows = cur.fetchall()\n\n cur.close()\n\n # Create dictionary of src_id keys & associated values\n rowdict = {}\n for row in rows:\n rowdict[row['src_id']] = [row['assoc_id'], row['sep'], row['match']]\n\n for src in detected_sources:\n # Get the associated source object\n asrc = [msrc for msrc in assoc_sources if \\\n msrc.id == rowdict[src.src_id][0]][0]\n if rowdict[src.src_id][2]:\n # It's a match!\n src.assoc_id = asrc.id\n detected_matched.append(src)\n # Compute weighted averages\n cur_sigra_sq = asrc.e_ra * asrc.e_ra\n cur_sigdec_sq = asrc.e_dec * asrc.e_dec\n asrc.e_ra = np.sqrt(1. / (\n (1. / cur_sigra_sq) + (1. / (src.e_ra * src.e_ra))))\n asrc.ra = (asrc.e_ra * asrc.e_ra) * (\n (asrc.ra / cur_sigra_sq) + (src.ra / (\n src.e_ra * src.e_ra)))\n asrc.e_dec = np.sqrt(1. / (\n (1. / cur_sigdec_sq) + (1. / (src.e_dec * src.e_dec))))\n asrc.dec = (asrc.e_dec * asrc.e_dec) * (\n (asrc.dec / cur_sigdec_sq) + (src.dec / (\n src.e_dec * src.e_dec)))\n asrc.ndetect += 1\n assoc_matched.append(asrc)\n else:\n # No match -- new source\n src.res_class = res_class\n src.ndetect = 1\n detected_unmatched.append(src)\n assoc_unmatched.append(asrc)\n if not save:\n match_logger.info('{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}'.format(\n src.src_id, rowdict[src.src_id][2], asrc.id, asrc.ra,\n asrc.e_ra, asrc.dec, asrc.e_dec, rowdict[src.src_id][1],\n asrc.ndetect))\n\n match_logger.info(' -- number of matches: {}'.format(len(detected_matched)))\n match_logger.info(' -- number of new sources to add: {}'.format(\n len(detected_unmatched)))\n\n return detected_matched, detected_unmatched, assoc_matched, assoc_unmatched",
"def calibrate(science_list_fname, master_flat_fname, master_dark_fname, hp_map_fname, bp_map_fname, mask_bad_pixels = False,\n clean_Bad_Pix=True, replace_nans=True, background_fname = None, outdir = None):\n\n #Get the list of science frames\n #science_list = np.loadtxt(science_list_fname, dtype=str)\n science_list = science_list_fname\n\n #Open the master dark\n master_dark_hdu = f.open(master_dark_fname)\n master_dark = master_dark_hdu[0].data\n dark_shape = np.shape(master_dark)\n print((\"Subtracting {} from each flat file\".format(master_dark_fname)))\n dark_exp_time = master_dark_hdu[0].header['EXPTIME']\n\n #Open the master flat\n master_flat_hdu = f.open(master_flat_fname)\n master_flat = master_flat_hdu[0].data\n print((\"Dividing each file by {}\".format(master_flat_fname)))\n dark_exp_time = master_dark_hdu[0].header['EXPTIME']\n\n #Open the bad pixel map from flat\n bp_map_hdu = f.open(bp_map_fname)\n bad_pixel_map = bp_map_hdu[0].data\n bad_pixel_map_bool = np.array(bad_pixel_map, dtype=bool)\n print((\"Using bad pixel map {}\".format(bp_map_fname)))\n\n #now if hot pixel map from dark is also given\n if hp_map_fname != None:\n hp_map_hdu = f.open(hp_map_fname)\n hot_pixel_map = hp_map_hdu[0].data\n bad_pixel_map_bool = np.logical_or(bad_pixel_map_bool, hot_pixel_map.astype(bool) )\n\n\n if background_fname != None:\n background_hdu = f.open(background_fname)\n background = background_hdu[0].data\n print(\"Subtracting background frame {} from all science files\".format(background_fname))\n\n\n for fname in science_list:\n #Open the file\n print((\"Calibrating {}\".format(fname\n )))\n hdu = f.open(fname)\n data = hdu[0].data\n science_exp_time = hdu[0].header['EXPTIME']\n\n if dark_exp_time != science_exp_time:\n warnings.warn(\"The master dark file doesn't have the same exposure time as the data. We'll scale the dark for now, but this isn't ideal\", UserWarning)\n factor = science_exp_time/dark_exp_time\n else:\n factor = 1.\n\n #Subtract the dark, divide by flat\n redux = ((data - factor*master_dark)/master_flat)\n #get rid of crazy values at bad pixel\n redux = redux*~bad_pixel_map_bool\n\n if background_fname != None:\n redux -= background\n\n if clean_Bad_Pix:\n # plt.plot(bad_pixel_map_bool)\n redux = cleanBadPix(redux, bad_pixel_map_bool)\n #redux = ccdproc.cosmicray_lacosmic(redux, sigclip=5)[0]\n\n # redux = ccdproc.cosmicray_median(redux, mbox=7, rbox=5, gbox=7)[0]\n\n #Mask the bad pixels if the flag is set\n if mask_bad_pixels:\n redux *= ~bad_pixel_map_bool\n\n if replace_nans:\n # nan_map = ~np.isfinite(redux)\n # redux = cleanBadPix(redux, nan_map)\n # plt.imshow(redux-after)\n nanmask = np.isnan(redux) #nan = True, just in case this is useful\n redux = np.nan_to_num(redux)\n\n #Put the cablibrated data back in the HDU list\n hdu[0].data = redux\n\n #Add pipeline version and history keywords\n vers = version.get_version()\n hdu[0].header.set('PL_VERS',vers,'Version of pipeline used for processing')\n hdu[0].header['HISTORY'] = \"Subtracting {} from each flat file\".format(master_dark_fname)\n hdu[0].header['HISTORY'] = \"Dividing each file by {}\".format(master_flat_fname)\n\n if background_fname != None:\n hdu[0].header['HISTORY'] = \"Subtracted background frame {}\".format(background_fname)\n\n if mask_bad_pixels:\n hdu[0].header['HISTORY'] = \"Masking all bad pixels found in {}\".format(bp_map_fname)\n\n if clean_Bad_Pix:\n hdu[0].header['HISTORY'] = \"Cleaned all bad pixels found in {} using a median filter\".format(bp_map_fname)\n\n # #Append the bad pixel list to the HDU list\n # hdu.append(f.PrimaryHDU([bad_pixel_map]))\n # hdu[1].header['HISTORY'] = \"Appending bad pixel map :{}\".format(bp_map_fname)\n # hdu[1].header['HISTORY'] = \"0 = good pixel\"\n # hdu[1].header['HISTORY'] = \"1 = bad pixel from flat fields\"\n # hdu[1].header['HISTORY'] = \"2 = hot pixel from darks\"\n\n outname = fname.split('.')[0]+\"_calib.fits\"\n\n #if an output directory is specified we can write out to that directory instead\n #making sure to take only the stuff after the last '/' to avoid directory issues from fname\n if outdir:\n outname = outdir + fname.split('/')[-1]\n\n print((\"Writing calibrated file to {}\".format(outname)))\n #Save the calibrated file\n hdu.writeto(outname, overwrite=True)\n\n # f.PrimaryHDU(redux).writeto('redux_'+i, overwrite = True)",
"def update_mask_sources_ifgs(mask_sources, sources, mask_ifgs, ifgs):\n import numpy as np\n import numpy.ma as ma\n from icasar.aux import col_to_ma\n\n \n \n def apply_new_mask(ifgs, mask_old, mask_new):\n \"\"\"Apply a new mask to a collection of ifgs (or sources) that are stored as row vectors with an accompanying mask. \n Inputs:\n ifgs | r2 array | ifgs as row vectors\n mask_old | r2 array | mask to convert a row of ifg into a rank 2 masked array\n mask_new | r2 array | the new mask to be applied. Note that it must not unmask any pixels that are already masked. \n Returns:\n ifgs_new_mask | r2 array | as per ifgs, but with a new mask. \n History:\n 2020/06/26 | MEG | Written\n 2022_03_30 | MEG | fix a bug in the number of pixels for the output array (ie. number of columns of ifgs_new_mask)\n \"\"\"\n\n \n for ifg_n, ifg in enumerate(ifgs): # Loop through each source\n ifg_r2 = col_to_ma(ifg, mask_old) # turn it from a row vector into a rank 2 masked array \n ifg_r2_new_mask = ma.array(ifg_r2, mask = mask_new) # apply the new mask \n ifg_r1_new_mask = ma.compressed(ifg_r2_new_mask) # convert to row vector \n if ifg_n == 0: # if it's the first ifg.. \n n_pixs_new = ifg_r1_new_mask.shape[0] # get the new number of pixels \n ifgs_new_mask = np.zeros((ifgs.shape[0], n_pixs_new)) # initiate an array of the correct size\n ifgs_new_mask[ifg_n, :] = ifg_r1_new_mask # put the row vector into the array\n return ifgs_new_mask\n \n \n # check some inputs. Not exhuastive!\n if (len(sources.shape) != 2) or (len(ifgs.shape) != 2):\n raise Exception(f\"Both 'sources' and 'ifgs' must be rank 2 arrays (even if they are only a single source). Exiting. \")\n \n if mask_sources.shape != mask_ifgs.shape:\n raise Exception(f\"The two masks must be the same size, even if they mask different pixels. Exiting. \")\n \n mask_both = ~np.logical_and(~mask_sources, ~mask_ifgs) # make a new mask for pixels that are in the sources AND in the current time series\n n_pixs_sources = len(np.argwhere(mask_sources == False)) # masked pixels are 1s, so invert with 1- bit so that non-masked are 1s, then sum to get number of pixels\n n_pixs_new = len(np.argwhere(mask_ifgs == False)) # ditto for new mask\n n_pixs_both = len(np.argwhere(mask_both == False)) # ditto for the mutual mask\n print(f\"Updating masks and ICA sources. Of the {n_pixs_sources} in the 1st set of sources and {n_pixs_new} in the 2nd set of sources, \"\n f\"{n_pixs_both} are in both and can be used in the following step. \")\n \n ifgs_new_mask = apply_new_mask(ifgs, mask_ifgs, mask_both) # apply the new mask to the old ifgs and return the non-masked elemts as row vectors. \n sources_new_mask = apply_new_mask(sources, mask_sources, mask_both) # ditto for the sources. \n \n return ifgs_new_mask, sources_new_mask, mask_both",
"def _load_sources(self):\n self.point_sources= []\n if os.path.exists(os.path.join(self.folder,'pickle.zip')):\n pzip = zipfile.ZipFile(os.path.join(self.folder,'pickle.zip'))\n files = ['pickle/HP12_%04d.pickle' %i for i in range(1728)]\n assert all(f in pzip.namelist() for f in files), 'Improper model zip file'\n opener = pzip.open\n else:\n files = glob.glob(os.path.join(self.folder, 'pickle', '*.pickle'))\n files.sort()\n opener = open\n self.nside = int(np.sqrt(len(files)/12))\n if len(files) != 12*self.nside**2:\n msg = 'Number of pickled ROI files, %d, found in folder %s, not consistent with HEALpix' \\\n % (len(files),os.path.join(self.folder, 'pickle'))\n raise Exception(msg)\n \n ####self.global_sources = sources.GlobalSourceList() # allocate list to index parameters for global sources\n self.extended_sources=[] # list of unique extended sources\n self.changed=set() # to keep track of extended models that are different from catalog\n moved=0\n nfreed = 0\n self.tagged=set()\n source_names =[]\n for i,file in enumerate(files):\n p = pickle.load(opener(file))\n index = int(os.path.splitext(file)[0][-4:])\n assert i==index, 'logic error: file name %s inconsistent with expected index %d' % (file, i)\n roi_sources = p.get('sources', {}) # don't know why this needed\n extended_names = {} if (self.__dict__.get('extended_catalog') is None) else self.extended_catalog.names\n for key,item in roi_sources.items():\n if key in extended_names: continue\n if key in source_names:\n #if not self.quiet: print ('SkyModel warning: source with name %s in ROI %d duplicates previous entry: ignored'%(key, i))\n continue\n source_names.append(key)\n skydir = item['skydir']\n if self.update_positions is not None:\n ellipse = item.get('ellipse', None)\n ts = item['ts']\n if ellipse is not None and not np.any(np.isnan(ellipse)) :\n fit_ra, fit_dec, a, b, ang, qual, delta_ts = ellipse\n if qual<5 and a < 0.2 and \\\n ts>self.update_positions and delta_ts>0.1:\n skydir = SkyDir(float(fit_ra),float(fit_dec))\n moved +=1\n self.tagged.add(i)\n \n ps = sources.PointSource(name=key,\n skydir=skydir, model= sources.convert_model(item['model']),\n ts=item['ts'],band_ts=item['band_ts'], index=index)\n if sources.validate(ps,self.nside, self.filter):\n self._check_position(ps) # check that it is not coincident with previous source(warning for now?)\n self.point_sources.append( ps)\n # make a list of extended sources used in the model \n names = p.get('diffuse_names')\n for name, oldmodel in zip(names, p['diffuse']):\n model = sources.convert_model(oldmodel) # convert from old Model version if necessary \n key = name.split('_')[0]\n if key in self.diffuse_dict:\n self.diffuse_dict.add_model(index, name, model)\n elif self.extended_catalog_name=='ignore': \n continue\n else:\n try:\n es = self.extended_catalog.lookup(name) if self.extended_catalog is not None else None\n except Exception as msg:\n print ('Skymodel: Failed to create model for %s' %name)\n raise\n if es is None:\n #raise Exception( 'Extended source %s not found in extended catalog' %name)\n print ('SkyModel warning: Extended source %s not found in extended catalog, removing' %name)\n continue\n if self.hpindex(es.skydir)!=index: continue\n \n if es.model.name!=model.name:\n if name not in self.changed:\n if not self.quiet: print ('SkyModel warning: catalog model %s changed from %s for source %s: keeping change'%\\\n (es.model.name, model.name, name))\n self.changed.add(name)\n es.smodel=es.model=model #update with current fit values always\n if sources.validate(es,self.nside, self.filter): #lambda x: True): \n self.extended_sources.append(es)\n # check for new extended sources not yet in model\n self._check_for_extended()\n if self.update_positions and moved>0:\n print ('updated positions of %d sources, healpix ids in tagged' % moved)",
"def setup_flags(self):\n self.io_args.color = self.io_args.color_full\n self.io_args.rig_in = self.io_args.rig\n self.io_args.matches = os.path.join(self.io_args.output_root, \"matches.json\")\n self.io_args.rig_out = os.path.join(self.io_args.output_root, \"rig.json\")",
"def go_calib():\n\n ####################\n #\n # Calibration files:\n # everything created under calib/\n #\n ####################\n # Darks - created in subdir darks/\n # - darks needed to make bad pixel mask\n # - store the resulting dark in the file name that indicates the\n # integration time (2.8s) and the coadds (10ca).\n # -- If you use the OSIRIS image, you must include the full filename in the list. \n darkFiles = ['i200822_s003{0:03d}_flip'.format(ii) for ii in range(23, 27+1)]\n calib.makedark(darkFiles, 'dark_39.832s_1ca_6rd.fits', instrument=osiris)\n\n darkFiles = ['i200822_s003{0:03d}_flip'.format(ii) for ii in range(28, 32+1)]\n calib.makedark(darkFiles, 'dark_5.901s_1ca_4rd.fits', instrument=osiris)\n\n darkFiles = ['i200822_s020{0:03d}_flip'.format(ii) for ii in range(2, 10+1)]\n calib.makedark(darkFiles, 'dark_11.802s_4ca_4rd.fits', instrument=osiris)\n\n darkFiles = ['i200822_s021{0:03d}_flip'.format(ii) for ii in range(2, 10+1)]\n calib.makedark(darkFiles, 'dark_5.901s_8ca_1rd.fits', instrument=osiris)\n\n # Flats - created in subdir flats/\n offFiles = ['i200822_s003{0:03d}_flip'.format(ii) for ii in range(3, 21+1, 2)]\n onFiles = ['i200822_s003{0:03d}_flip'.format(ii) for ii in range(4, 22+1, 2)]\n calib.makeflat(onFiles, offFiles, 'flat_kp_tdOpen.fits', instrument=osiris)\n\n # Masks (assumes files were created under calib/darks/ and calib/flats/)\n calib.makemask('dark_39.832s_1ca_6rd.fits', 'flat_kp_tdOpen.fits',\n 'supermask.fits', instrument=osiris)",
"def test_02_source(self):\n for activity in self.manager_stravagpx:\n self.assertEqual(activity.metadata.source_format, 'gpx')\n for activity in self.manager_garmintcx:\n self.assertEqual(activity.metadata.source_format, 'tcx')\n for activity in self.manager_fit:\n self.assertEqual(activity.metadata.source_format, 'fit')",
"def add_catalogs(self):\n n_exposures = len(self.info['Module'])\n self.info['point_source'] = [None] * n_exposures\n self.info['galaxyListFile'] = [None] * n_exposures\n self.info['extended'] = [None] * n_exposures\n self.info['convolveExtended'] = [False] * n_exposures\n self.info['movingTarg'] = [None] * n_exposures\n self.info['movingTargSersic'] = [None] * n_exposures\n self.info['movingTargExtended'] = [None] * n_exposures\n self.info['movingTargToTrack'] = [None] * n_exposures\n\n for i in range(n_exposures):\n if int(self.info['detector'][i][-1]) < 5:\n filtkey = 'ShortFilter'\n pupilkey = 'ShortPupil'\n else:\n filtkey = 'LongFilter'\n pupilkey = 'LongPupil'\n filt = self.info[filtkey][i]\n pup = self.info[pupilkey][i]\n\n if self.point_source[i] is not None:\n # In here, we assume the user provided a catalog to go with each filter\n # so now we need to find the filter for each entry and generate a list that makes sense\n self.info['point_source'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.point_source, 'point source')))\n else:\n self.info['point_source'][i] = None\n if self.galaxyListFile[i] is not None:\n self.info['galaxyListFile'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.galaxyListFile, 'galaxy')))\n else:\n self.info['galaxyListFile'][i] = None\n if self.extended[i] is not None:\n self.info['extended'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.extended, 'extended')))\n else:\n self.info['extended'][i] = None\n if self.movingTarg[i] is not None:\n self.info['movingTarg'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.movingTarg, 'moving point source target')))\n else:\n self.info['movingTarg'][i] = None\n if self.movingTargSersic[i] is not None:\n self.info['movingTargSersic'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.movingTargSersic, 'moving sersic target')))\n else:\n self.info['movingTargSersic'][i] = None\n if self.movingTargExtended[i] is not None:\n self.info['movingTargExtended'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.movingTargExtended, 'moving extended target')))\n else:\n self.info['movingTargExtended'][i] = None\n if self.movingTargToTrack[i] is not None:\n self.info['movingTargToTrack'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.movingTargToTrack, 'non-sidereal moving target')))\n else:\n self.info['movingTargToTrack'][i] = None\n if self.convolveExtended is True:\n self.info['convolveExtended'] = [True] * n_exposures",
"def match_source_blend_isochrones(params,source,blend,log):\n\n if 'none' in str(params['isochrone_file']).lower():\n log.info('No input file with isochrone data provided, skipping isochrone analysis.')\n\n else:\n log.info('\\n')\n log.info('Analysing isochrones for source star\\n')\n star_data = isochrone_utilities.analyze_isochrones(source.gr_0,source.ri_0,\n params['isochrone_file'],\n log=log)\n source.mass = star_data[0]\n source.sig_mass = star_data[1]\n source.teff = star_data[2]\n source.sig_teff = star_data[3]\n source.logg = star_data[4]\n source.sig_logg = star_data[5]\n source.estimate_luminosity_class(log=log)\n\n log.info('\\n')\n log.info('Analysing isochrones for blend\\n')\n\n star_data = isochrone_utilities.analyze_isochrones(blend.gr_0,blend.ri_0,\n params['isochrone_file'],\n log=log)\n blend.mass = star_data[0]\n blend.sig_mass = star_data[1]\n blend.teff = star_data[2]\n blend.sig_teff = star_data[3]\n blend.logg = star_data[4]\n blend.sig_logg = star_data[5]\n blend.estimate_luminosity_class(log=log)\n\n return source, blend",
"def sources(obj, reftype):",
"def pre_process_source(source, sourcemag, sourcepb, sourcez, smooth=True):\n inspec = None\n inspecz = np.nan\n inspecmag = np.nan\n inspecpb = None\n\n source_table_file = os.path.join('sources', 'sourcetable.txt')\n source_table_file = io.get_pkgfile(source_table_file)\n source_table = at.Table.read(source_table_file, format='ascii')\n ind = (source_table['specname'] == source)\n nmatch = len(source_table['specname'][ind])\n if nmatch == 1:\n # load the file and the info\n inspec = source_table['specname'][ind][0]\n inspecz = source_table['redshift'][ind][0]\n inspecmag = source_table['g'][ind][0] # for now, just normalize the g-band mag\n elif nmatch == 0:\n message = 'Spectrum {} not listed in lookup table'.format(source)\n pass\n else:\n message = 'Spectrum {} not uniquely listed in lookup table'.format(source)\n pass\n\n if inspec is None:\n warnings.warn(message, RuntimeWarning)\n inspec = source\n inspecz = sourcez\n inspecmag = sourcemag\n inspecpb = sourcepb\n\n if not os.path.exists(inspec):\n message = 'Spectrum {} could not be found'.format(inspec)\n raise ValueError(message)\n\n try:\n spec = at.Table.read(inspec, names=('wave','flux'), format='ascii')\n except Exception as e:\n message = 'Could not read file {}'.format(source)\n raise ValueError(message)\n\n if hasattr(inspecpb,'wave') and hasattr(inspecpb, 'throughput'):\n pass\n else:\n pbs = passband.load_pbs([inspecpb], 0.)\n try:\n inspecpb = pbs[inspecpb][0]\n except KeyError as e:\n message = 'Could not load passband {}'.format(inspecpb)\n raise RuntimeError(message)\n\n try:\n inspecmag = float(inspecmag)\n except (TypeError, ValueError) as e:\n message = 'Source magnitude {} could not be interpreted as a float'.format(inspecmag)\n raise ValueError(message)\n\n try:\n inspecz = float(inspecz)\n except (TypeError, ValueError) as e:\n message = 'Source redshift {} could not be interpreted as a float'.format(inspecz)\n raise ValueError(message)\n\n if inspecz < 0 :\n message = 'Source must have positive definite cosmological redshift'\n raise ValueError(message)\n\n inspec = S.ArraySpectrum(spec['wave'], spec['flux'], fluxunits='flam')\n try:\n inspec = inspec.renorm(sourcemag, 'ABmag', inspecpb)\n inspec.convert('flam')\n except Exception as e:\n message = 'Could not renormalize spectrum {}'.format(inspec)\n raise RuntimeError(message)\n\n if inspecz > 0:\n zblue = 1./(1+inspecz) - 1.\n inspec_rest = inspec.redshift(zblue)\n inspec_rest.convert('flam')\n c = default_cosmology.get()\n mu = c.distmod(inspecz)\n out = inspec_rest*(10.**(0.4*mu.value))\n else:\n out = inspec\n # TODO renorm is basic and just calculates dmag = RNval - what the original spectrum's mag is\n # and renormalizes - there's some sanity checking for overlaps\n # we can do this without using it and relying on the .passband routines\n return out",
"def reffile_setup(self):\n # Prepare to find files listed as 'config'\n # and set up PSF path\n\n # set up as dictionary of dictionaries\n self.configfiles = {}\n self.psfpath = {}\n self.psfbasename = {}\n self.psfpixfrac = {}\n self.reference_file_dir = {}\n\n for instrument in 'nircam niriss fgs'.split():\n self.configfiles[instrument] = {}\n self.psfpath[instrument] = os.path.join(self.datadir, instrument, 'gridded_psf_library')\n self.psfbasename[instrument] = instrument\n self.reference_file_dir[instrument] = os.path.join(self.datadir, instrument, 'reference_files')\n\n # Set instrument-specific file paths\n if instrument == 'nircam':\n self.psfpixfrac[instrument] = 0.25\n elif instrument == 'niriss':\n self.psfpixfrac[instrument] = 0.1\n elif instrument == 'fgs':\n self.psfpixfrac[instrument] = 0.1\n\n # Set global file paths\n self.configfiles[instrument]['filter_throughput'] = os.path.join(self.modpath, 'config', 'placeholder.txt')\n\n for instrument in 'miri nirspec'.split():\n self.configfiles[instrument] = {}\n self.psfpixfrac[instrument] = 0\n self.psfbasename[instrument] = 'N/A'\n\n # create empty dictionaries\n list_names = 'superbias linearity gain saturation ipc astrometric photom pam dark lindark'.split()\n for list_name in list_names:\n setattr(self, '{}_list'.format(list_name), {})\n\n self.det_list = {}\n self.det_list['nircam'] = ['A1', 'A2', 'A3', 'A4', 'A5', 'B1', 'B2', 'B3', 'B4', 'B5']\n self.det_list['niriss'] = ['NIS']\n self.det_list['fgs'] = ['G1', 'G2']\n self.det_list['nirspec'] = ['NRS']\n self.det_list['miri'] = ['MIR']\n\n for instrument in 'nircam niriss fgs miri nirspec'.split():\n for list_name in list_names:\n getattr(self, '{}_list'.format(list_name))[instrument] = {}\n\n if self.offline:\n # no access to central store. Set all files to none.\n for list_name in list_names:\n if list_name in 'dark lindark'.split():\n default_value = ['None']\n else:\n default_value = 'None'\n for det in self.det_list[instrument]:\n getattr(self, '{}_list'.format(list_name))[instrument][det] = default_value\n\n elif instrument == 'nircam':\n rawdark_dir = os.path.join(self.datadir, 'nircam/darks/raw')\n lindark_dir = os.path.join(self.datadir, 'nircam/darks/linearized')\n for det in self.det_list[instrument]:\n self.dark_list[instrument][det] = glob(os.path.join(rawdark_dir, det, '*.fits'))\n self.lindark_list[instrument][det] = glob(os.path.join(lindark_dir, det, '*.fits'))\n\n elif instrument in ['nirspec', 'miri']:\n for key in 'subarray_def_file fluxcal filtpupil_pairs readpatt_def_file crosstalk ' \\\n 'dq_init_config saturation_config superbias_config refpix_config ' \\\n 'linearity_config filter_throughput'.split():\n self.configfiles[instrument][key] = 'N/A'\n default_value = 'none'\n for list_name in list_names:\n for det in self.det_list[instrument]:\n getattr(self, '{}_list'.format(list_name))[instrument][det] = default_value\n\n else: # niriss and fgs\n for det in self.det_list[instrument]:\n if det == 'G1':\n self.dark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/raw', FGS1_DARK_SEARCH_STRING))\n self.lindark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/linearized', FGS1_DARK_SEARCH_STRING))\n\n elif det == 'G2':\n self.dark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/raw', FGS2_DARK_SEARCH_STRING))\n self.lindark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/linearized', FGS2_DARK_SEARCH_STRING))\n\n elif det == 'NIS':\n self.dark_list[instrument][det] = glob(os.path.join(self.datadir, 'niriss/darks/raw',\n '*uncal.fits'))\n self.lindark_list[instrument][det] = glob(os.path.join(self.datadir, 'niriss/darks/linearized',\n '*linear_dark_prep_object.fits'))",
"def separateSource(self,compInfo):\n sourceInfo = {}\n source = []\n for eachline in compInfo:\n words = eachline.split() ##This line need to be confirmed with Manas\n if eachline[0] in ['f', 'h']:\n source.append(words[3])\n if len(source) > 0:\n for eachline in compInfo:\n words_s = eachline.split()\n if words_s[0] in source:\n sourceInfo[words_s[0]] = words_s[1:3]\n return sourceInfo",
"def main():\n\n (options, args) = parse_options(sys.argv)\n\n iterator = GFFParser.GFFAddingIterator() \n examiner = GFFParser.GFFExaminer()\n\n exon_map = dict()\n\n id_dict = examiner.available_limits(options.anno)['gff_id']\n intron_lists = dict()\n\n ### collect all available sources from gff-file\n source_dict = examiner.available_limits(options.anno)['gff_source_type']\n taken_sources = set()\n #types = ['gene', 'mRNA', 'exon', 'CDS']\n types = ['exon']\n\n ### parse only for exons and let the GFFparser \n ### infer the respective parents (otherwise doubled entries occured)\n ### we sanitize the structure later on anyways\n for key in [source[0] for source in source_dict.keys() if source[1] in types]:\n taken_sources.add(key)\n\n ### try different type, if sources are empty \n if len(taken_sources) == 0:\n types = ['CDS']\n for key in [source[0] for source in source_dict.keys() if source[1] in types]:\n taken_sources.add(key)\n\n ### print taken_sources\n if len(taken_sources) == 0:\n print >> sys.stderr, 'No suitable sources found!'\n sys.exit(-1)\n\n ### only show available sources - if neccessary\n if options.show_sources:\n print 'Parsed file %s\\n' % options.anno\n print 'Following sources are available:\\n'\n for source in taken_sources:\n print source \n print '\\nUse option -s to specify a comma-separated list of sources (-s source1,source2,source3), otherwise all sources are taken'\n sys.exit(0)\n\n if options.sources != '':\n user_sources = set(options.sources.split(','))\n taken_sources = taken_sources.intersection(user_sources)\n if len(taken_sources) == 0:\n print >> sys.stderr, 'The specified sources do not match any of the available sources - Please use option -S to get a list of available sources'\n sys.exit(-1)\n\n if options.verbose:\n print \"take sources %s\" % str(list(taken_sources))\n\n ### build up gff-parsing filter\n gff_sources = []\n for source in taken_sources:\n gff_sources.extend(zip([source] * len(types), types))\n\n ### parse gff-file\n for idx in id_dict.keys():\n print 'parsing chromosome %s' % idx\n if len(gff_sources) > 0:\n trans_dict = iterator.get_all_features(options.anno, {'gff_source_type':gff_sources, 'gff_id':idx})\n else:\n trans_dict = iterator.get_all_features(options.anno, {'gff_id':idx})\n ### since we parse only one chromosome, this loop is evaluated only once\n for chrm in trans_dict.keys():\n ### verify/sanitize the created dictionairy\n fix_structure(trans_dict[chrm])\n intron_lists[chrm] = dict()\n for gene in trans_dict[chrm].features:\n for trans in gene.sub_features:\n if trans.type == 'exon':\n print \"WARNING: Exon on transcript level:\"\n print trans\n print 'will continue\\n'\n continue\n elif len(trans.sub_features) > 1: ### at least two exons for one intron ...\n strand = trans.sub_features[0].strand\n contig_list = [(trans.sub_features[i].location.nofuzzy_start, trans.sub_features[i].location.nofuzzy_end) for i in range(len(trans.sub_features))]\n contig_list.sort(lambda u, v:u[0]-v[0])\n for exon in range(len(contig_list) - 1):\n ### update intron lists\n if contig_list[exon][1] - contig_list[exon + 1][0] == 0:\n continue\n try:\n assert(contig_list[exon][1] < contig_list[exon + 1][0])\n except AssertionError:\n print >> sys.stderr, 'exon_1 %i, exon_2 %i' % (contig_list[exon][1], contig_list[exon + 1][0]) \n print >> sys.stderr, contig_list[exon]\n print >> sys.stderr, contig_list[exon+1]\n print >> sys.stderr, exon\n sys.exit(-1)\n ### for now strand information is only dummy\n intron_lists[chrm][(0, contig_list[exon][1], contig_list[exon + 1][0])] = strand\n \n ### update exon map\n for exon in range(len(contig_list)):\n if not exon_map.has_key(chrm):\n exon_map[chrm] = dict()\n\n if not exon_map[chrm].has_key(trans.id):\n exon_map[chrm][trans.id] = dict()\n ### we assume, that an exon cannot occurr twice in the same transcript!\n ### the value in the dict is a binary encoding, if the left/right end is intronic 10 = 2 means, 5' end is intronic\n if len(contig_list) == 1:\n exon_map[chrm][trans.id][contig_list[exon]] = 0 ### 00 -> should never occurr\n elif exon == 0:\n exon_map[chrm][trans.id][contig_list[exon]] = 2 ### 10\n elif exon == len(contig_list) - 1:\n exon_map[chrm][trans.id][contig_list[exon]] = 1 ### 01\n else:\n exon_map[chrm][trans.id][contig_list[exon]] = 3 ### 11 \n\n outfile = open(options.outfile, 'w')\n cPickle.dump(intron_lists, outfile)\n outfile.close()\n \n outfile = open(options.outfile + '.' + 'cov', 'w')\n cPickle.dump(exon_map, outfile)\n outfile.close()",
"def __init__(self, experiment_dir, main_conf_map, prep_conf_map, *args, **kwargs):\n self.experiment_dir = experiment_dir\n\n self.det_name = None\n self.roi = None\n try:\n self.scan_range = [int(s) for s in main_conf_map.scan.split('-')]\n # single scan or multiple scans will be given as range\n if len(self.scan_range) == 1:\n self.scan_range.append(self.scan_range[0])\n scan_end = self.scan_range[-1]\n except:\n print(\"scans not defined in main config\")\n self.scan_range = None\n\n if self.scan_range is not None:\n try:\n specfile = main_conf_map.specfile.strip()\n # parse det name and saved roi from spec\n self.det_name, self.roi = get_det_from_spec(specfile, scan_end)\n if self.det_name is not None and self.det_name.endswith(':'):\n self.det_name = self.det_name[:-1]\n except AttributeError:\n print(\"specfile not configured\")\n except:\n print(\"exception parsing spec file\")\n\n # detector name from configuration will override the one paesed from spec file\n try:\n self.det_name = prep_conf_map.detector\n except:\n if self.det_name is None:\n # default detector get_frame method just reads tif files and doesn't do anything to them.\n print('Detector name is not available, using default detector class')\n self.det_name = \"default\"\n\n # if roi is set in config file use it, just in case spec had it wrong or it's not there.\n try:\n self.roi = prep_conf_map.roi\n except:\n pass\n\n try:\n self.separate_scans = prep_conf_map.separate_scans\n except:\n self.separate_scans = False\n\n try:\n self.Imult = prep_conf_map.Imult\n except:\n self.Imult = None\n\n try:\n self.min_files = self.prep_map.min_files\n except:\n self.min_files = 0\n try:\n self.exclude_scans = self.prep_map.exclude_scans\n except:\n self.exclude_scans = []",
"def detail_matching(self):\n paradic = self.cfg['param']['paradic']\n work_dir = self.work_dir\n \n x = float(self.cfg['param']['x']) # selected pixel in the first image\n y = float(self.cfg['param']['y'])\n \n # sift parameters\n # number of bins in the orientation histogram\n n_bins = int(paradic['n_bins']) \n n_hist = int(paradic['n_hist']) \n # descriptor of n_hist X n_hist weighted histograms with n_ori\n n_ori = int(paradic['n_ori']) \n delta_min = float(paradic['delta_min'])\n sigma_min = float(paradic['sigma_min'])\n sigma_in = float(paradic['sigma_in'])\n lambda_ori = float(paradic['lambda_ori'])\n lambda_descr = float(paradic['lambda_descr'])\n #threshold defining reference orientations\n n_spo = int(paradic['n_spo'])\n \n # Read feature vectors from output files\n if (os.path.getsize(work_dir+'OUTmatches.txt') > 0 ):\n pairdata = find_nearest_keypoint(work_dir+'OUTmatches.txt', y, x)\n \n illustrate_pair(pairdata, n_bins, n_hist, n_ori, work_dir)\n\n \n # Read keys coordinates.\n d = 6+n_bins+n_hist*n_hist*n_ori # size of keydata inside pairdata\n v = n_hist*n_hist*n_ori\n [x1, y1, sigma1, theta1] = [float(x) for x in pairdata[0:4]]\n [o1, s1] = [float(x) for x in pairdata[4+v:4+v+2]]\n [x2a, y2a, sigma2a, theta2a] = [float(x) for x in pairdata[d:d+4]]\n [o2a, s2a] = [float(x) for x in pairdata[d+4+v:d+4+v+2]]\n [x2b, y2b, sigma2b, theta2b] = \\\n [float(x) for x in pairdata[2*d:2*d+4]]\n [o2b, s2b] = [float(x) for x in pairdata[2*d+4+v:2*d+4+v+2]]\n \n draw_one_match(pairdata,\n work_dir+'input_0.png',\n work_dir+'input_1.png',\n d,\n lambda_ori,\n lambda_descr,\n n_hist,\n work_dir+'OUTonepair.png')\n \n \n # Extract thumbnails.\n # keypoint 1 (image 1)\n print ' '.join(['demo_extract_patch', work_dir+'input_0.png',\n str(x1), str(y1), str(sigma1), str(theta1), str(o1), str(s1),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im1\"])\n proc = self.run_proc(['demo_extract_patch', work_dir+'input_0.png',\n str(x1), str(y1), str(sigma1), str(theta1), str(o1), str(s1),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im1\"])\n self.wait_proc(proc, timeout=self.timeout)\n \n # keypoint 2a (nearest neighbor in image 2)\n print ' '.join(['demo_extract_patch', work_dir+'input_1.png',\n str(x2a), str(y2a), str(sigma2a), str(theta2a), str(o2a), str(s2a),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im2a\"])\n proc = self.run_proc(['demo_extract_patch', work_dir+'input_1.png',\n str(x2a), str(y2a), str(sigma2a), str(theta2a), str(o2a), str(s2a),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im2a\"])\n self.wait_proc(proc, timeout=self.timeout) \n \n # keypoint 2b (second nearest neighbor in image 2)\n proc = self.run_proc(['demo_extract_patch', work_dir+'input_1.png',\n str(x2b), str(y2b), str(sigma2b), str(theta2b), str(o2b), str(s2b),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im2b\"])\n self.wait_proc(proc, timeout=self.timeout) \n \n \n return 1",
"def __init__(self, x=0, y=0, flux=None, time=None, wcs=None, quality=None, mask=None, exposure=1800, sector=0,\n size=150,\n camera=1, ccd=1, cadence=None):\n super(Source, self).__init__()\n if cadence is None:\n cadence = []\n if quality is None:\n quality = []\n if wcs is None:\n wcs = []\n if time is None:\n time = []\n if flux is None:\n flux = []\n\n self.size = size\n self.sector = sector\n self.camera = camera\n self.ccd = ccd\n self.cadence = cadence\n self.quality = quality\n self.exposure = exposure\n self.wcs = wcs\n co1 = 38.5\n co2 = 116.5\n catalog_1 = self.search_gaia(x, y, co1, co1)\n catalog_2 = self.search_gaia(x, y, co1, co2)\n catalog_3 = self.search_gaia(x, y, co2, co1)\n catalog_4 = self.search_gaia(x, y, co2, co2)\n catalogdata = vstack([catalog_1, catalog_2, catalog_3, catalog_4], join_type='exact')\n catalogdata = unique(catalogdata, keys='DESIGNATION')\n coord = wcs.pixel_to_world([x + (size - 1) / 2 + 44], [y + (size - 1) / 2])[0].to_string()\n ra = float(coord.split()[0])\n dec = float(coord.split()[1])\n catalogdata_tic = tic_advanced_search_position_rows(ra=ra, dec=dec, radius=(self.size + 2) * 21 * 0.707 / 3600)\n # print(f'no_of_stars={len(catalogdata_tic)}, camera={camera}, ccd={ccd}: ra={ra}, dec={dec}, radius={(self.size + 2) * 21 * 0.707 / 3600}')\n self.tic = convert_gaia_id(catalogdata_tic)\n self.flux = flux[:, y:y + size, x:x + size]\n self.mask = mask[y:y + size, x:x + size]\n self.time = np.array(time)\n median_time = np.median(self.time)\n interval = (median_time - 388.5) / 365.25\n\n num_gaia = len(catalogdata)\n tic_id = np.zeros(num_gaia)\n x_gaia = np.zeros(num_gaia)\n y_gaia = np.zeros(num_gaia)\n tess_mag = np.zeros(num_gaia)\n in_frame = [True] * num_gaia\n for i, designation in enumerate(catalogdata['DESIGNATION']):\n ra = catalogdata['ra'][i]\n dec = catalogdata['dec'][i]\n if not np.isnan(catalogdata['pmra'].mask[i]): # masked?\n ra += catalogdata['pmra'][i] * np.cos(np.deg2rad(dec)) * interval / 1000 / 3600\n if not np.isnan(catalogdata['pmdec'].mask[i]):\n dec += catalogdata['pmdec'][i] * interval / 1000 / 3600\n pixel = self.wcs.all_world2pix(\n np.array([catalogdata['ra'][i], catalogdata['dec'][i]]).reshape((1, 2)), 0, quiet=True)\n x_gaia[i] = pixel[0][0] - x - 44\n y_gaia[i] = pixel[0][1] - y\n try:\n tic_id[i] = catalogdata_tic['ID'][np.where(catalogdata_tic['GAIA'] == designation.split()[2])[0][0]]\n except:\n tic_id[i] = np.nan\n if np.isnan(catalogdata['phot_g_mean_mag'][i]):\n in_frame[i] = False\n elif catalogdata['phot_g_mean_mag'][i] >= 25:\n in_frame[i] = False\n elif -4 < x_gaia[i] < self.size + 3 and -4 < y_gaia[i] < self.size + 3:\n dif = catalogdata['phot_bp_mean_mag'][i] - catalogdata['phot_rp_mean_mag'][i]\n tess_mag[i] = catalogdata['phot_g_mean_mag'][\n i] - 0.00522555 * dif ** 3 + 0.0891337 * dif ** 2 - 0.633923 * dif + 0.0324473\n if np.isnan(tess_mag[i]):\n tess_mag[i] = catalogdata['phot_g_mean_mag'][i] - 0.430\n if np.isnan(tess_mag[i]):\n in_frame[i] = False\n else:\n in_frame[i] = False\n\n tess_flux = 10 ** (- tess_mag / 2.5)\n t = Table()\n t[f'tess_mag'] = tess_mag[in_frame]\n t[f'tess_flux'] = tess_flux[in_frame]\n t[f'tess_flux_ratio'] = tess_flux[in_frame] / np.nanmax(tess_flux[in_frame])\n t[f'sector_{self.sector}_x'] = x_gaia[in_frame]\n t[f'sector_{self.sector}_y'] = y_gaia[in_frame]\n catalogdata = hstack([catalogdata[in_frame], t]) # TODO: sorting not sorting all columns\n catalogdata.sort('tess_mag')\n self.gaia = catalogdata",
"def add_reffile_overrides(self):\n all_obs_info, unique_obs_info = self.info_for_all_observations()\n\n # Add empty placeholders for reference file entries\n empty_col = np.array([' ' * 500] * len(self.info['Instrument']))\n superbias_arr = deepcopy(empty_col)\n linearity_arr = deepcopy(empty_col)\n saturation_arr = deepcopy(empty_col)\n gain_arr = deepcopy(empty_col)\n distortion_arr = deepcopy(empty_col)\n photom_arr = deepcopy(empty_col)\n ipc_arr = deepcopy(empty_col)\n transmission_arr = deepcopy(empty_col)\n badpixmask_arr = deepcopy(empty_col)\n pixelflat_arr = deepcopy(empty_col)\n\n # Loop over combinations, create metadata dict, and get reffiles\n for status in unique_obs_info:\n updated_status = deepcopy(status)\n (instrument, detector, filtername, pupilname, readpattern, exptype) = status\n\n if instrument == 'FGS':\n if detector in ['G1', 'G2']:\n detector = detector.replace('G', 'GUIDER')\n updated_status = (instrument, detector, filtername, pupilname, readpattern, exptype)\n\n # If the user entered reference files in self.reffile_defaults\n # use those over what comes from the CRDS query\n #sbias, lin, sat, gainfile, dist, ipcfile, pam = self.reffiles_from_dict(status)\n manual_reffiles = self.reffiles_from_dict(updated_status)\n for key in manual_reffiles:\n if manual_reffiles[key] == 'none':\n manual_reffiles[key] = 'crds'\n\n # Identify entries in the original list that use this combination\n match = [i for i, item in enumerate(all_obs_info) if item==status]\n\n # Populate the reference file names for the matching entries\n superbias_arr[match] = manual_reffiles['superbias']\n linearity_arr[match] = manual_reffiles['linearity']\n saturation_arr[match] = manual_reffiles['saturation']\n gain_arr[match] = manual_reffiles['gain']\n distortion_arr[match] = manual_reffiles['distortion']\n photom_arr[match] = manual_reffiles['photom']\n ipc_arr[match] = manual_reffiles['ipc']\n transmission_arr[match] = manual_reffiles['transmission']\n badpixmask_arr[match] = manual_reffiles['badpixmask']\n pixelflat_arr[match] = manual_reffiles['pixelflat']\n\n self.info['superbias'] = list(superbias_arr)\n self.info['linearity'] = list(linearity_arr)\n self.info['saturation'] = list(saturation_arr)\n self.info['gain'] = list(gain_arr)\n self.info['astrometric'] = list(distortion_arr)\n self.info['photom'] = list(photom_arr)\n self.info['ipc'] = list(ipc_arr)\n self.info['transmission'] = list(transmission_arr)\n self.info['badpixmask'] = list(badpixmask_arr)\n self.info['pixelflat'] = list(pixelflat_arr)",
"def testUsedFlag(self):\n self.exposure.setWcs(self.tanWcs)\n config = AstrometryTask.ConfigClass()\n config.wcsFitter.order = 2\n config.wcsFitter.numRejIter = 0\n\n sourceSchema = afwTable.SourceTable.makeMinimalSchema()\n measBase.SingleFrameMeasurementTask(schema=sourceSchema) # expand the schema\n # schema must be passed to the solver task constructor\n solver = AstrometryTask(config=config, refObjLoader=self.refObjLoader, schema=sourceSchema)\n sourceCat = self.makeSourceCat(self.tanWcs, sourceSchema=sourceSchema)\n\n results = solver.run(\n sourceCat=sourceCat,\n exposure=self.exposure,\n )\n # check that the used flag is set the right number of times\n count = 0\n for source in sourceCat:\n if source.get('calib_astrometry_used'):\n count += 1\n self.assertEqual(count, len(results.matches))",
"def get_diffuse_sources(self, src_sel):\n extended = self._select_and_freeze(self.extended_sources, src_sel)\n for s in extended: # this seems redundant, but was necessary\n s.model.free[:] = False if src_sel.frozen(s) else s.free[:]\n sources.validate(s,self.nside, None)\n s.smodel = s.model\n \n return self.get_global_sources(src_sel.skydir()), extended",
"def _get_setup_from_noiseSFTs(self):\n SFTConstraint = self._get_sft_constraints_from_tstart_duration()\n noise_multi_sft_catalog = lalpulsar.GetMultiSFTCatalogView(\n lalpulsar.SFTdataFind(self.noiseSFTs, SFTConstraint)\n )\n if noise_multi_sft_catalog.length == 0:\n raise IOError(\"Got empty SFT catalog.\")\n\n # Information to be extracted from the SFTs themselves\n IFOs = []\n tstart = []\n tend = []\n Tsft = []\n self.sftfilenames = [] # This refers to the MFD output!\n\n for ifo_catalog in noise_multi_sft_catalog.data:\n ifo_name = lalpulsar.ListIFOsInCatalog(ifo_catalog).data[0]\n\n time_stamps = lalpulsar.TimestampsFromSFTCatalog(ifo_catalog)\n this_Tsft = int(round(1.0 / ifo_catalog.data[0].header.deltaF))\n this_start_time = time_stamps.data[0].gpsSeconds\n this_end_time = time_stamps.data[-1].gpsSeconds + this_Tsft\n\n self.sftfilenames.append(\n utils.get_official_sft_filename(\n ifo_name,\n time_stamps.length, # ifo_catalog.length fails for NB case\n this_Tsft,\n this_start_time,\n this_end_time - this_start_time,\n self.label,\n )\n )\n\n IFOs.append(ifo_name)\n tstart.append(this_start_time)\n tend.append(this_end_time)\n Tsft.append(this_Tsft)\n\n # Get the \"overall\" values of the search\n Tsft = np.unique(Tsft)\n if len(Tsft) != 1:\n raise ValueError(f\"SFTs contain different basetimes: {Tsft}\")\n if Tsft[0] != self.Tsft:\n logger.warning(\n f\"Overwriting self.Tsft={self.Tsft}\"\n f\" with value {Tsft[0]} read from noiseSFTs.\"\n )\n self.Tsft = Tsft[0]\n self.tstart = min(tstart)\n self.duration = max(tend) - self.tstart\n self.detectors = \",\".join(IFOs)",
"def _init_good_paper(self):\n self.good_paper = [False] * len(self.src_raw)\n for i in range(len(self.src_raw)):\n # if any(pattern in ' '.join(self.src_raw[i]) for pattern in self.good_patterns):\n if any(pattern.lower() in ' '.join(self.src_raw[i]).lower() for pattern in self.good_patterns):\n self.good_paper[i] = True",
"def _load_sources(self):\n ss_dir = SteelScriptDir('AppResponse', 'files')\n\n for svc in [PACKETS_REPORT_SERVICE_NAME,\n GENERAL_REPORT_SERVICE_NAME]:\n svc_version = self.appresponse.versions[svc]\n sw_version = (self.appresponse.get_info()['sw_version']\n .replace(' ', ''))\n sources_filename = ('{}-sources-{}-{}.pcl'\n .format(svc, svc_version, sw_version))\n sources_file = ss_dir.get_data(sources_filename)\n\n sources_file.read()\n\n if not sources_file.data:\n svcdef = self.appresponse.find_service(svc)\n\n # sources is a list of dictionaries\n sources = svcdef.bind('sources').execute('get').data['items']\n\n # the whole set of sources for current service\n all_sources = {}\n\n for source in sources:\n cols = source['columns']\n source['columns'] = \\\n OrderedDict(sorted(zip(map(lambda x: x['id'], cols),\n cols)))\n source['filters_on_metrics'] = \\\n source['capabilities']['filters_on_metrics']\n if 'granularities' not in source:\n source['granularities'] = None\n\n all_sources[source['name']] = source\n\n if source['name'] in report_source_to_groups:\n self._sources[source['name']] = source\n\n # source_file writes the whole set of sources to disk\n sources_file.data = all_sources\n sources_file.write()\n logger.debug(\"Wrote sources data into {}\"\n .format(sources_filename))\n else:\n logger.debug(\"Loading sources data from {}\"\n .format(sources_filename))\n # Only load valid sources based on settings\n for k, v in sources_file.data.iteritems():\n if k in report_source_to_groups:\n self._sources[k] = v\n\n return",
"def extract_sources(self, doc):\n self.log.info(\"Extracting sources for %s\" % doc)\n\n sources_added = 0\n\n for u in doc.utterances:\n if u.entity.person:\n p = u.entity.person\n\n s = DocumentSource()\n s.person = p\n s.affiliation = p.affiliation\n s.quoted = True\n s.unnamed = False\n\n if doc.add_source(s):\n sources_added += 1\n\n self.log.info(\"Added %d sources for %s\" % (sources_added, doc))"
] | [
"0.56265473",
"0.55381423",
"0.55253875",
"0.5407288",
"0.53591067",
"0.53485554",
"0.53367805",
"0.5311908",
"0.5266865",
"0.52260643",
"0.5216051",
"0.5157914",
"0.5119476",
"0.507221",
"0.5060244",
"0.50464916",
"0.5040846",
"0.50221545",
"0.50190264",
"0.5007729",
"0.5004638",
"0.4990731",
"0.49883717",
"0.49851885",
"0.49493897",
"0.49393192",
"0.49342254",
"0.49256486",
"0.49006352",
"0.4880991"
] | 0.69685996 | 0 |
Checks that the Drakecreated flavor of nlopt.cpp (via a patch file) is consistent with the upstreamgenerated flavor of same (via CMake). If this test fails during an NLopt version pin upgrade, you will need to update patches/gen_enums.patch with the reported differences. | def test_enum_cross_check(self):
# Load both input files.
# "actual" refers to the the Drake-created flavor (via a patch file).
# "expected" refers to the upstream-generated flavor (via CMake).
manifest = runfiles.Create()
actual_file = manifest.Rlocation(
"nlopt_internal/genrule/nlopt.hpp")
with open(actual_file) as f:
actual = f.read()
expected_file = manifest.Rlocation(
"drake/tools/workspace/nlopt_internal/test/nlopt-upstream.hpp")
with open(expected_file) as f:
expected = f.read()
# When CMake is processing the header file, it removes blank lines.
# We will do the same to our actual file to prep for comparison.
actual = actual.replace("\n\n", "\n")
# CMake also does something inexplicable to tab-spaced macro line
# endings. Canonicalize those in both files for comparison.
actual = re.sub(r'\s+\\', r' \\', actual)
expected = re.sub(r'\s+\\', r' \\', expected)
# Compare.
self.assertMultiLineEqual(expected, actual) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check(self, expected):\n versions = ['3.0', '4.0', '5.0', '6.0', '7.0', '8.0']\n modes = ['strict', 'normal', 'ignore']\n\n for version in versions:\n for mode in modes:\n assert self.get(app_version=version, compat_mode=mode) == (\n expected['-'.join([version, mode])])",
"def test_new_overrides_git(self):\n for n in range(4, 6):\n for prefix in ['', 'git-']:\n cmd = '%scmd%d' % (prefix, n)\n parser = GbpOptionParser(cmd)\n actual = parser.config['new_overrides_git_option1']\n expected = 'new_overrides_git_value1'\n self.assertEqual(actual, expected, \"%s != %s for %s\" % (actual, expected, cmd))",
"def test_valid_min_cppstd_from_outdated_settings(cppstd):\n conanfile = _create_conanfile(\"gcc\", \"9\", \"Linux\", cppstd, \"libstdc++\")\n assert not valid_min_cppstd(conanfile, \"17\", False)",
"def test_good(self):\n expected = {\n '0.1.0': rpm_version('0.1.0', '1'),\n '0.1.0-99-g3d644b1': rpm_version('0.1.0', '1.99.g3d644b1'),\n '0.1.1pre1': rpm_version('0.1.1', '0.pre.1'),\n '0.1.1': rpm_version('0.1.1', '1'),\n '0.2.0dev1': rpm_version('0.2.0', '0.dev.1'),\n '0.2.0dev2-99-g3d644b1':\n rpm_version('0.2.0', '0.dev.2.99.g3d644b1'),\n '0.2.0dev3-100-g3d644b2-dirty': rpm_version(\n '0.2.0', '0.dev.3.100.g3d644b2.dirty'),\n }\n unexpected_results = []\n for supplied_version, expected_rpm_version in expected.items():\n actual_rpm_version = make_rpm_version(supplied_version)\n if actual_rpm_version != expected_rpm_version:\n unexpected_results.append((\n supplied_version,\n actual_rpm_version,\n expected_rpm_version\n ))\n\n if unexpected_results:\n self.fail(unexpected_results)",
"def test_check_min_cppstd_from_outdated_settings(cppstd):\n conanfile = _create_conanfile(\"gcc\", \"9\", \"Linux\", cppstd, \"libstdc++\")\n with pytest.raises(ConanInvalidConfiguration) as exc:\n check_min_cppstd(conanfile, \"17\", False)\n assert \"Current cppstd ({}) is lower than the required C++ standard (17).\" \\\n \"\".format(cppstd) == str(exc.value)",
"def test_draftN_format_checker(self):\n\n with self.assertWarns(DeprecationWarning) as w:\n from asdf._jsonschema import draft202012_format_checker # noqa\n\n self.assertIs(\n draft202012_format_checker,\n validators.Draft202012Validator.FORMAT_CHECKER,\n )\n self.assertEqual(w.filename, __file__)\n self.assertTrue(\n str(w.warning).startswith(\n \"Accessing asdf._jsonschema.draft202012_format_checker is \",\n ),\n msg=w.warning,\n )\n\n with self.assertWarns(DeprecationWarning) as w:\n from asdf._jsonschema import draft201909_format_checker # noqa\n\n self.assertIs(\n draft201909_format_checker,\n validators.Draft201909Validator.FORMAT_CHECKER,\n )\n self.assertEqual(w.filename, __file__)\n self.assertTrue(\n str(w.warning).startswith(\n \"Accessing asdf._jsonschema.draft201909_format_checker is \",\n ),\n msg=w.warning,\n )\n\n with self.assertWarns(DeprecationWarning) as w:\n from asdf._jsonschema import draft7_format_checker # noqa\n\n self.assertIs(\n draft7_format_checker,\n validators.Draft7Validator.FORMAT_CHECKER,\n )\n self.assertEqual(w.filename, __file__)\n self.assertTrue(\n str(w.warning).startswith(\n \"Accessing asdf._jsonschema.draft7_format_checker is \",\n ),\n msg=w.warning,\n )\n\n with self.assertWarns(DeprecationWarning) as w:\n from asdf._jsonschema import draft6_format_checker # noqa\n\n self.assertIs(\n draft6_format_checker,\n validators.Draft6Validator.FORMAT_CHECKER,\n )\n self.assertEqual(w.filename, __file__)\n self.assertTrue(\n str(w.warning).startswith(\n \"Accessing asdf._jsonschema.draft6_format_checker is \",\n ),\n msg=w.warning,\n )\n\n with self.assertWarns(DeprecationWarning) as w:\n from asdf._jsonschema import draft4_format_checker # noqa\n\n self.assertIs(\n draft4_format_checker,\n validators.Draft4Validator.FORMAT_CHECKER,\n )\n self.assertEqual(w.filename, __file__)\n self.assertTrue(\n str(w.warning).startswith(\n \"Accessing asdf._jsonschema.draft4_format_checker is \",\n ),\n msg=w.warning,\n )\n\n with self.assertWarns(DeprecationWarning) as w:\n from asdf._jsonschema import draft3_format_checker # noqa\n\n self.assertIs(\n draft3_format_checker,\n validators.Draft3Validator.FORMAT_CHECKER,\n )\n self.assertEqual(w.filename, __file__)\n self.assertTrue(\n str(w.warning).startswith(\n \"Accessing asdf._jsonschema.draft3_format_checker is \",\n ),\n msg=w.warning,\n )\n\n with self.assertRaises(ImportError):\n from asdf._jsonschema import draft1234_format_checker # noqa",
"def test_version():\n with open(\"pyproject.toml\") as f:\n tomllines = f.read().splitlines()\n tomlversion = set([l for l in tomllines if \"version =\" in l])\n initversion = set([f'version = \"{mei2volpiano.__version__}\"'])\n # set is there to catch any duplicate/additional entries\n assert initversion == tomlversion",
"def py_versiontest(c):\n pass",
"def test_options_with_choices_changing(self) -> None:\n testdir = Path(os.path.join(self.unit_test_dir, '83 change option choices'))\n options1 = str(testdir / 'meson_options.1.txt')\n options2 = str(testdir / 'meson_options.2.txt')\n\n # Test that old options are changed to the new defaults if they are not valid\n real_options = str(testdir / 'meson_options.txt')\n self.addCleanup(os.unlink, real_options)\n\n shutil.copy(options1, real_options)\n self.init(str(testdir))\n self.mac_ci_delay()\n shutil.copy(options2, real_options)\n\n self.build()\n opts = self.introspect('--buildoptions')\n for item in opts:\n if item['name'] == 'combo':\n self.assertEqual(item['value'], 'b')\n self.assertEqual(item['choices'], ['b', 'c', 'd'])\n elif item['name'] == 'array':\n self.assertEqual(item['value'], ['b'])\n self.assertEqual(item['choices'], ['b', 'c', 'd'])\n\n self.wipe()\n self.mac_ci_delay()\n\n # When the old options are valid they should remain\n shutil.copy(options1, real_options)\n self.init(str(testdir), extra_args=['-Dcombo=c', '-Darray=b,c'])\n self.mac_ci_delay()\n shutil.copy(options2, real_options)\n self.build()\n opts = self.introspect('--buildoptions')\n for item in opts:\n if item['name'] == 'combo':\n self.assertEqual(item['value'], 'c')\n self.assertEqual(item['choices'], ['b', 'c', 'd'])\n elif item['name'] == 'array':\n self.assertEqual(item['value'], ['b', 'c'])\n self.assertEqual(item['choices'], ['b', 'c', 'd'])",
"def test_patch_hyperflex_hxdp_version(self):\n pass",
"def test_get_cons3rt_version(self):\n pass",
"def check_pkg_consistency():\n pass",
"def test_get_short_version(self):\n pass",
"def _sanityCheckProtocolVersions(other):\n if other.minVersion > other.maxVersion:\n raise ValueError(\"Versions set incorrectly\")\n if other.minVersion not in KNOWN_VERSIONS:\n raise ValueError(\"minVersion set incorrectly\")\n if other.maxVersion not in KNOWN_VERSIONS:\n raise ValueError(\"maxVersion set incorrectly\")\n\n if other.maxVersion < (3, 4):\n other.versions = [i for i in other.versions if i < (3, 4)]",
"def test_single_git_override_disabled_deprecations(self):\n for prefix in ['', 'git-']:\n os.environ['GBP_DISABLE_SECTION_DEPRECTATION'] = 'true'\n parser = GbpOptionParser('%scmd2' % prefix)\n self.assertEqual(parser.config['single_git_override_option1'], 'single_git_override_value1')\n for line in range(0, 2):\n self._check_log_empty()\n os.environ.pop('GBP_DISABLE_SECTION_DEPRECTATION')",
"def test_higher_version_always_preferred(self):\n try:\n self.prepare()\n self.assertEquals((1, 2, 4), compute_version(\n get_git_describe(repository_directory=self.repo, fix_environment=True, accepted_tag_pattern='repo-*')\n ))\n finally:\n rmtree(self.repo)\n os.chdir(self.oldcwd)",
"def test_usedforsecurity_flag_behavior(self) -> None:\n for version, expected in {\n self.sys_v3_8: (True, 'md5'),\n self.sys_v3_9: (False, 'md5'),\n self.sys_v4_8: (False, 'md5'),\n }.items():\n assert _attempt_init_of_python_3_9_hash_object(self.fake_md5, version) == expected",
"def test_pynast_suported_version(self):\r\n min_acceptable_version = (1, 2)\r\n max_acceptable_version = (1, 2, 2)\r\n try:\r\n from pynast import __version__ as pynast_lib_version\r\n version = pynast_lib_version.split('.')\r\n if version[-1][-4:] == '-dev':\r\n version[-1] = version[-1][:-4]\r\n version = tuple(map(int, version))\r\n pass_test = (version >= min_acceptable_version and\r\n version <= max_acceptable_version)\r\n version_string = str(pynast_lib_version)\r\n except ImportError:\r\n pass_test = False\r\n version_string = \"Not installed\"\r\n\r\n min_version_str = '.'.join(map(str, min_acceptable_version))\r\n max_version_str = '.'.join(map(str, max_acceptable_version))\r\n error_msg = (\"Unsupported pynast version. Must be >= %s and <= %s, \"\r\n \"but running %s.\" % (min_version_str, max_version_str,\r\n version_string))\r\n self.assertTrue(pass_test, error_msg)",
"def test_patch_hyperflex_software_version_policy(self):\n pass",
"def test_git_py2py3_fresh_nodeps_ignore_pairs_without_common_versions(\n self):\n fake_results = self.success_data + self.pairs_without_common_versions\n self.fake_store.save_compatibility_statuses(fake_results)\n package_name = 'git+git://github.com/google/api-core.git'\n self.assertImageResponseGithub(package_name)\n self.assertTargetResponse(package_name, 'py2', 'py3')",
"def validate_configurator_version():\n if settings.CONFIGURATOR_MODULE == \"bootmachine.contrib.configurators.salt\":\n pkgver = settings.SALT_AUR_PKGVER\n pkgrel = settings.SALT_AUR_PKGREL\n response = urllib2.urlopen(\"https://aur.archlinux.org/packages/sa/salt/PKGBUILD\")\n for line in response:\n if line.startswith(\"pkgver=\") and not pkgver in line:\n abort(\"The requested Salt 'pkgrel={0}' in the AUR was updated to '{1}'.\".format(\n pkgver, line.strip()))\n if line.startswith(\"pkgrel=\") and not pkgrel in line:\n abort(\"The requested Salt 'pkgrel={0}' in the AUR was updated to '{1}'.\".format(\n pkgrel, line.strip()))",
"def test_patch_namespaced_build_config(self):\n pass",
"def test_pypi_py2py3_fresh_nodeps_ignore_pairs_without_common_versions(\n self):\n fake_results = self.success_data + self.pairs_without_common_versions\n self.fake_store.save_compatibility_statuses(fake_results)\n package_name = 'google-api-core'\n self.assertImageResponsePyPI(package_name)\n self.assertTargetResponse(package_name, 'py2', 'py3')",
"def version_check(self):\n # anchor_matcher --> matcher\n if hasattr(self, \"anchor_matcher\"):\n self.matcher = self.anchor_matcher\n if hasattr(self, \"head_in_features\"):\n self.in_features = self.head_in_features\n if hasattr(self, \"test_topk_candidates\"):\n self.topk_candidates = self.test_topk_candidates\n if hasattr(self, \"test_score_thresh\"):\n self.score_threshold = self.test_score_thresh",
"def test_bug_2247(self):\n\n code, out, err = self.t(\"rc.color:0 add test\")\n self.assertIn(\"Configuration override\", err)\n\n # Once rc.verbose:nothing is set, no output about configuration overrides should appear\n code, out, err = self.t(\"rc.verbose:nothing add test\")\n self.assertNotIn(\"Configuration override\", err)\n\n code, out, err = self.t(\"rc.color:0 rc.verbose:nothing add test\")\n self.assertNotIn(\"Configuration override\", err)\n\n code, out, err = self.t(\"rc.verbose:nothing rc.color:0 add test\")\n self.assertNotIn(\"Configuration override\", err)",
"def test_0_opts(self):\n\n # Test that bad options return expected exit code.\n self.pkgrecv(command=\"--newest\", exit=2)\n self.pkgrecv(self.durl1, \"-!\", exit=2)\n self.pkgrecv(self.durl1, \"-p foo\", exit=2)\n self.pkgrecv(self.durl1, \"-d {0} [email protected]\".format(self.tempdir),\n exit=1)\n self.pkgrecv(self.durl1, \"-d {0} [email protected]\".format(\n self.tempdir), exit=1)\n\n # Test help.\n self.pkgrecv(command=\"-h\", exit=0)\n\n # Verify that pkgrecv requires a destination repository.\n self.pkgrecv(self.durl1, \"'*'\", exit=2)\n\n # Verify that a non-existent repository results in failure.\n npath = os.path.join(self.test_root, \"nochance\")\n self.pkgrecv(self.durl1, \"-d file://{0} foo\".format(npath), exit=1)\n\n # Test list newest.\n self.pkgrecv(self.durl1, \"--newest\")\n output = self.reduceSpaces(self.output)\n \n def _nobuild_fmri(pfmri):\n return fmri.PkgFmri(pfmri).get_fmri(\n include_build=False)\n\n # The latest version of amber and bronze should be listed\n # (sans publisher prefix currently).\n amber = _nobuild_fmri(self.published[1])\n scheme = _nobuild_fmri(self.published[8])\n bronze = _nobuild_fmri(self.published[4])\n tree = _nobuild_fmri(self.published[5])\n branch = _nobuild_fmri(self.published[6])\n leaf = _nobuild_fmri(self.published[7])\n\n expected = \"\\n\".join((amber, branch, bronze, leaf, scheme, tree)) + \"\\n\"\n self.assertEqualDiff(expected, output)",
"def test_versionComponents(self):\n self.assertEqual(\n (int, int, int),\n tuple(\n type(info) for info\n in [nevow.version.major, nevow.version.minor, nevow.version.micro]))",
"def test_deprecated(self):\n def new_deprecated():\n return cfg.DeprecatedOpt(uuid.uuid4().hex, group=uuid.uuid4().hex)\n\n opt_names = ['service-type', 'valid-interfaces', 'endpoint-override']\n depr = dict([(n, [new_deprecated()]) for n in opt_names])\n opts = loading.get_adapter_conf_options(deprecated_opts=depr)\n\n for opt in opts:\n if opt.name in opt_names:\n self.assertIn(depr[opt.name][0], opt.deprecated_opts)",
"def test_bug_19_23_at_plone_org(self):\n import quintagroup.seoptimizer\n try:\n zcml.load_config('overrides.zcml', quintagroup.seoptimizer)\n except IOError:\n self.fail(\"overrides.zcml removed from the package root\")",
"def test_checkFlags(self):\n self.failUnlessEqual(self.nice.opts['aflag'], 1)\n self.failUnlessEqual(self.nice.opts['flout'], 0)"
] | [
"0.56607723",
"0.5562484",
"0.54389495",
"0.5402845",
"0.53964126",
"0.5389693",
"0.53053814",
"0.5245198",
"0.523824",
"0.5220819",
"0.5130013",
"0.50907856",
"0.5084268",
"0.507624",
"0.50242937",
"0.5022775",
"0.5013908",
"0.5011498",
"0.50096434",
"0.49897176",
"0.49723658",
"0.49635184",
"0.49633497",
"0.49589193",
"0.4958425",
"0.49544773",
"0.49384037",
"0.49110904",
"0.489085",
"0.48830795"
] | 0.6848094 | 0 |
Evaluate the given distribution function in the point(s) (r, ppar, pperp). Use the vector 'v' to specify the parameters of this distribution function. | def Eval(self, r, ppar, pperp, v, gamma=None, p2=None, p=None, xi=None):
while False:
yield None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _p_v_at_r(self, v, r):\n if hasattr(self, \"_logfQ_interp\"):\n return (\n numpy.exp(\n self._logfQ_interp(\n -_evaluatePotentials(self._pot, r, 0) - 0.5 * v**2.0\n )\n )\n * v**2.0\n )\n else:\n return (\n self.fQ(-_evaluatePotentials(self._pot, r, 0) - 0.5 * v**2.0)\n * v**2.0\n )",
"def prob_V(self, V_array, mu, sd, v):\n from scipy.integrate import quad\n step = lambda x: 0.5 * (np.sign(x) + 1) # Heaviside step function\n red = lambda V: (V - mu) / sd # reduced voltage\n P_integrand = lambda u: step(u - red(self.V_r)) * np.exp(u**2) # integrand\n \n low = red(V_r)\n up = (self.theta - mu) / sd\n integral = quad(P_integrand, low, up)[0]\n \n P_V_array = 2 * v * self.tau_m * 1e-3 / sd * np.exp(- ((V_array - self.E_L) - mu)**2 / sd**2) * integral\n return step(-(V_array - self.E_L) + self.theta) * P_V_array",
"def perp_vector(p, q, r):\n v = cross(q - r, q - p)\n return v / mod(v) + q",
"def evaluate(self,p):\n if not self.initialized: self.__initialize__()\n if self.vp0: p_ = 1-p\n else: p_ = p\n if self.ids_to_consider is None:\n #sum on all parametrized cell\n cf = np.sum(self.V[self.p_ids-1]*p_)/self.V_tot - self.max_v_frac\n else:\n cf = np.sum((self.V[self.ids_to_consider-1]*p_))/self.V_tot - self.max_v_frac\n return cf",
"def vinet(p, v):\n x = ( v / p[3] ) ** ( 1.0 / 3 )\n xi = 3.0 / 2 * ( p[2] - 1 )\n return p[0] + 9 * p[1] * p[3] / ( xi**2 ) * ( 1 + ( xi * ( 1 - x ) - 1 ) * np.exp( xi * ( 1 - x ) ) )",
"def pareto_distribution(v, p=0.8):\n thr = np.sum(v)*p\n cumsum = 0\n for i, _v in enumerate(v, 1):\n cumsum += _v\n if cumsum >= thr:\n return i * 1.0 / len(v)",
"def EvaluateFunction(self, p_float=..., p_float=..., p_float=...):\n ...",
"def f_v(self, v):\n\n return self.f(v[:, 0], v[:, 1], v[:, 2])",
"def pdf(self, x, **kwargs):\n from scipy.stats import rv_continuous\n return self.rvdist.pdf(x, **kwargs) if rv_continuous in self.rvdist.__class__.__mro__ \\\n else self.rvdist.evaluate(x, **kwargs)",
"def PoissonPDF(v):\n from scipy.special import gamma\n\n a = 3.24174\n b = 3.24269\n c = 1.26861\n g = gamma(a / c)\n k1 = c * b ** (a / c) / g\n pdf = k1 * np.power(v, (a - 1)) * np.exp(- b * np.power(v, c))\n return pdf",
"def evaluate_one(self, x):\n # p = 1. / (np.sqrt(2. * np.pi) * self.sigma) * \\\n # np.exp(-0.5 * (self.mean - x) * self.invvar * (self.mean - x))\n p = self.dist.probability(x)\n return p",
"def pdfPR(self,x,p,r):\n\t\treturn gammaF(x + r) / (factorial(x)*gammaF(r)) * p**r * (1-p)**x",
"def perron_term_fn(iteration_count, v, z):\n return -0.5 * z * (v + iteration_count - 0.5) / (\n (v + z + (iteration_count - 1.) / 2.) *\n (v + z + iteration_count / 2.))",
"def __v(pk: float, pna: float, pcl: float, pca: float) -> float:\n ex_ion = pk * ex_k + pna * ex_na + pcl * in_cl + pca * ex_ca\n in_ion = pk * in_k + pna * in_na + pcl * ex_cl + pca * in_ca\n v = r * t / f * np.log(ex_ion/in_ion) * 1000\n return v",
"def fun_gauss(p,r):\n return p[1] * np.exp(-((r/p[0])**2))",
"def pdf(self, point: np.ndarray) -> float:\n return self._probs.dot([rv.pdf(point) for rv in self._rvs])",
"def __call__(self, x, **kwargs):\n if len(kwargs) > 0:\n self.update(**kwargs)\n p = np.atleast_2d(np.zeros_like(x))\n a, b = self.get_args(x[...,0])\n p[...,1] = self.distribution.pdf(x[...,1], a, b, loc=self.loc(x[...,0]), scale=self.scale(x[...,0]))\n with np.errstate(invalid='ignore'):\n p[...,1] = np.log(p[...,1])\n return p",
"def Distribution_Local_galaxy_DM(self, dmv, vpar):\n val = vpar[0] * np.exp(-np.power((dmv - vpar[1]) / vpar[2], 2.)) \\\n + vpar[3] * np.exp(-np.power((dmv - vpar[4]) / vpar[5], 2.))\n return val",
"def fun_exp_p_gauss(p,r):\n return p[1] * np.exp(-((r**2/p[0]))) + p[3] * np.exp(-((np.abs(r)/p[2])))",
"def v(x):\n return x*x",
"def prob4():\n#raise NotImplementedError(\"Problem 4 Incomplete\")\n h = lambda x : x[0] < -1 and x[1] > 1\n f = lambda x : stats.multivariate_normal.pdf(x,mean=np.array([0,0]),cov=np.eye(2))\n g = lambda x : stats.multivariate_normal.pdf(x,mean=np.array([-1,1]),cov=np.eye(2))\n X = np.random.multivariate_normal(mean=np.array([-1,1]),cov=np.eye(2),size=10000)\n return 1./10000*np.sum(np.apply_along_axis(h,1,X)*np.apply_along_axis(f,1,X)/np.apply_along_axis(g,1,X))",
"def fitness(self):\n params = np.array([self['p{}'.format(i)] for i in range(n_pars)])\n \n return func(params)",
"def semiparametric_probability_function(self, index, eval_locs=None):\n\n index0 = index[self.endog == 0].values\n index1 = index[self.endog == 1].values\n\n if eval_locs is None:\n eval_locs = index.values\n f0_leave_one_out_locs = index[self.endog == 0].index.values\n f1_leave_one_out_locs = index[self.endog == 1].index.values\n f0_other_locs = f1_leave_one_out_locs\n f1_other_locs = f0_leave_one_out_locs\n else:\n f0_leave_one_out_locs = np.array([], dtype=np.int64)\n f1_leave_one_out_locs = np.array([], dtype=np.int64)\n f0_other_locs = np.arange(len(eval_locs))\n f1_other_locs = np.arange(len(eval_locs))\n\n # Density estimates conditional on the outcome.\n f0 = self.f_s(\n index=eval_locs,\n index_s=index0,\n leave_one_out_locs=f0_leave_one_out_locs,\n other_locs=f0_other_locs\n )\n f1 = self.f_s(\n index=eval_locs,\n index_s=index1,\n leave_one_out_locs=f1_leave_one_out_locs,\n other_locs=f1_other_locs\n )\n\n Δ0 = self.Δ(f=f0, s=0)\n Δ1 = self.Δ(f=f1, s=1)\n return (f1 + Δ1) / (f0 + f1 + Δ0 + Δ1)",
"def v_from_p_function(self):\r\n\r\n track_c = [] # p classical function,\r\n for i in range(len(self.dt.momentum_t)):\r\n track_c.append(self.dt.momentum_t[i] / self.dt.mass)\r\n\r\n (fig, ax) = plt.subplots()\r\n\r\n ax.plot(\r\n self.dt.momentum_t,\r\n track_c,\r\n linestyle=':',\r\n linewidth=1,\r\n color='b',\r\n label='classic',\r\n )\r\n\r\n # marker=\"+\", markersize = 13,\r\n # ax.plot(self.dt.momentum_t, self.observer.velT, linestyle=\" \",\r\n # color=\"k\",marker=\"+\", markersize = 13, label=\"measurement\")\r\n\r\n ax.plot(\r\n self.dt.momentum_t,\r\n self.dt.vel_t,\r\n linestyle=' ',\r\n color='k',\r\n marker='o',\r\n label='result of measurements',\r\n )\r\n ax.plot(\r\n self.dt.momentum_t,\r\n self.dt.vel_anl,\r\n linestyle='-',\r\n color='red',\r\n linewidth=1,\r\n label='continuum',\r\n )\r\n\r\n # Euler's method == analitical function. We not plot it.\r\n\r\n ax.plot(\r\n self.dt.momentum_t,\r\n self.dt.vn,\r\n linestyle='--',\r\n color='blue',\r\n marker='x',\r\n linewidth=1,\r\n label=\"Euler's method\",\r\n )\r\n\r\n # error\r\n\r\n ax.errorbar(self.dt.momentum_t, self.dt.vel_t, fmt='k ',\r\n yerr=self.dt.vel_t_err)\r\n\r\n xm = -1.0\r\n for i in range(len(self.dt.momentum_t)):\r\n if self.dt.momentum_t[i] > xm:\r\n xm = self.dt.momentum_t[i]\r\n stepx = round(xm / float(len(self.dt.momentum_t)), 1)\r\n xm = round(xm + stepx, 1)\r\n ax.set_xlim([0, xm]) # xm = 0.85\r\n\r\n # signature on the horizontal x-axis\r\n\r\n ax.set_xlabel('p')\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on x)\r\n\r\n locatorx = matplotlib.ticker.MultipleLocator(base=stepx) # step on x is base=0.1\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.xaxis.set_major_locator(locatorx)\r\n\r\n # line draw\r\n\r\n line = matplotlib.lines.Line2D([0.0, 9.0], [1.0, 1.0], color='b'\r\n )\r\n ax.add_line(line)\r\n plt.text(0.7, 1.01, u'light speed', horizontalalignment='center'\r\n )\r\n ax.set_ylim([0, 1.1])\r\n\r\n # signature on vertical y axis\r\n\r\n ax.set_ylabel('v')\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on y)\r\n\r\n locatory = matplotlib.ticker.MultipleLocator(base=0.1) # step on y is base=0.1\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.yaxis.set_major_locator(locatory)\r\n\r\n ax.grid()\r\n\r\n # show legend\r\n\r\n ax.legend(loc='upper left')\r\n\r\n # show drawing\r\n # pylab.show()\r\n\r\n plt.show()",
"def eval(self, *x, sigma=False):\n\n y = evalPol(self.pol, *x)\n if sigma: return y, np.sqrt(evalPol(self.covpol, *x))\n return y",
"def fun_exp_p_exp(p,r):\n return p[1] * np.exp(-((np.abs(r)/p[0]))) + p[3] * np.exp(-((np.abs(r)/p[2])))",
"def vanilla_call_price(S, K, r, v, T):\n return S * norm_cdf(d_j(1, S, K, r, v, T)) - \\\n K*exp(-r*T) * norm_cdf(d_j(2, S, K, r, v, T))",
"def rvs(self, *args):\n if self.method == 'spline':\n rvsValue = self._distribution.inverseCdf(random(),random())\n # if no transformation, then return the coordinate for the original input parameters\n # if there is a transformation, then return the coordinate in the reduced space\n elif self.method == 'pca':\n if self.transformation:\n rvsValue = self._distribution.coordinateInTransformedSpace(self.rank)\n else:\n coordinate = self._distribution.coordinateInTransformedSpace(self.rank)\n rvsValue = self._distribution.coordinateInverseTransformed(coordinate)\n else:\n self.raiseAnError(NotImplementedError,'rvs is not yet implemented for ' + self.method + ' method')\n return rvsValue",
"def v_p(self, psi_l, ci):\n\t\treturn min((ci*self.VPMAX0)/(ci + self.KP), self.VPR)",
"def fgausbg(v,p):\n return np.exp(-0.5 * ((v[0] - p[0]) / p[1])**2) * p[2] + p[3]"
] | [
"0.66941005",
"0.6040724",
"0.5935693",
"0.5931339",
"0.5931324",
"0.5880862",
"0.5767878",
"0.5735554",
"0.56684875",
"0.5650999",
"0.5604907",
"0.5558572",
"0.5557449",
"0.5551832",
"0.55289036",
"0.55191976",
"0.5493359",
"0.54931647",
"0.54723114",
"0.5459315",
"0.5458212",
"0.54018533",
"0.5392716",
"0.53828293",
"0.5369348",
"0.5364296",
"0.5364022",
"0.5352086",
"0.5305013",
"0.5301392"
] | 0.6704118 | 0 |
Calculate phase autocorrelation for each signal in seismic_stream | def acorr(seismic_signal, **kwargs):
# if seismic signal is a trace object, we pack it to a stream
if isinstance(seismic_signal, _tr.Trace):
sources = _st.Stream([seismic_signal])
else:
sources = seismic_signal
if not isinstance(sources, _st.Stream):
raise TypeError('seismic_stream is not a Stream nor Trace object')
return _st.Stream([_acorr_trace(tr, **kwargs) for tr in sources]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def step_autocorrelation(self):\n\n max_hops = max([len(x) for x in self.steps])\n\n self.acf = np.zeros([len(self.steps), max_hops])\n\n keep = [] # list to hold indices of trajectories with a non-zero amount of hops\n for i in range(len(self.steps)):\n hops = self.steps[i]\n if len(hops) > 1:\n self.acf[i, :len(self.steps[i])] = timeseries.acf(self.steps[i])\n keep.append(i)\n\n self.acf = self.acf[keep, :]\n\n self.acf = np.array([self.acf[np.nonzero(self.acf[:, i]), i].mean() for i in range(max_hops)])\n\n #self.acf = timeseries.step_autocorrelation(self.z_interpolated.T[..., np.newaxis])",
"def autocorrelation(x):\n x = np.asarray(x)\n N = len(x)\n x = x-x.mean()\n s = fft.fft(x, N*2-1)\n result = np.real(fft.ifft(s * np.conjugate(s), N*2-1))\n result = result[:N]\n result /= result[0]\n return result",
"def phase_reconstruct(input_mag):\n\n print(\"Reconstructing phase information...\")\n # Initialize random phases\n phase = 2 * np.pi * np.random.random_sample(input_mag.shape) - np.pi\n for i in range(500):\n # Compute spectrogram\n spectrogram = input_mag * np.exp(1j*phase)\n # Inverse stft to get signal from mag info and imperfect phase info\n temp_signal = librosa.istft(spectrogram, hop_length=1024, center=False)\n # Recover some meaningful phase info\n phase = np.angle(librosa.stft(temp_signal, hop_length=1024, \n n_fft=4096, center=False))\n\n if i % 25 == 0:\n print(str(round(i*.2, 2)) + \"% complete\")\n \n return phase",
"def autocorrelation(df,maxt,step,vari,acquisiton_time,division_time):\n maxt = int(maxt/acquisiton_time)\n step = int(step/acquisiton_time)\n df = connect_cells(df,vari)\n return np.vstack([correlation(df,Dt,vari) for Dt in\\\n np.arange(0,maxt,step)]),\\\n np.arange(0,maxt,step)*acquisiton_time/division_time",
"def autoc(array):\r\n return ifft2(np.square(np.abs(fft2(array))))",
"def autocorrFFT(x):\n\n N = len(x)\n F = np.fft.fft(x, n=2*N) # 2*N because of zero-padding\n PSD = F * F.conjugate()\n res = np.fft.ifft(PSD)\n res = (res[:N]).real # now we have the autocorrelation in convention B\n n = N*np.ones(N) - np.arange(0, N) # divide res(m) by (N-m)\n\n return res / n # this is the autocorrelation in convention A",
"def to_acf(self):\n rho = 0.5*np.fft.irfft(self) / self.delta_t\n return AutoCovariance(rho, delta_t=self.delta_t)",
"def autocorr(sig):\n return float(np.correlate(sig, sig))",
"def _compute_acf(values_in_series):\n\n autocorrelation_by_lag = numpy.correlate(\n values_in_series, values_in_series, mode='same')\n\n # Remove negative lags.\n lag_0_index = numpy.argmax(autocorrelation_by_lag)\n autocorrelation_by_lag = autocorrelation_by_lag[lag_0_index:]\n lags = numpy.linspace(\n 0, len(autocorrelation_by_lag) - 1, num=len(autocorrelation_by_lag),\n dtype=int)\n\n # Divide by num points used to compute each autocorrelation.\n num_points_by_lag = len(values_in_series) - lags\n autocorrelation_by_lag = autocorrelation_by_lag / num_points_by_lag\n\n # Normalize so that lag-0 autocorrelation is 1 (true by definition).\n autocorrelation_by_lag = autocorrelation_by_lag / autocorrelation_by_lag[0]\n\n return autocorrelation_by_lag, lags",
"def step_autocorrelation(trajectories, axis=0):\n\n try:\n if len(axis) == 1:\n axis = axis[0]\n except TypeError:\n pass\n\n ntraj = trajectories.shape[1] # number of particles with a trajectory\n\n # calculate acf of first trajectory in order to determine size of output array. timeseries.acf will truncate\n # the array slightly in order to make the FFT efficient\n ACF = acf(trajectories[1:, 0, axis] - trajectories[:-1, 0, axis])\n acfs = np.zeros([ntraj, ACF.size])\n acfs[0, :] = ACF\n\n keep = []\n for t in range(1, ntraj):\n steps = trajectories[1:, t, axis] - trajectories[:-1, t, axis]\n if not np.all(steps == 0):\n acfs[t, :] = acf(steps)\n keep.append(t)\n #acfs[t, :] = acf(trajectories[:ACF.size, t, axis])\n\n return acfs[keep, :]",
"def autocorrelation(x, nlags = 0):\n return [x.corr(x.shift(lag)) for lag in range(nlags + 1)]",
"def autocorrelation(x):\n x = (x - np.mean(x)) / (np.std(x) * np.sqrt(len(x)))\n result = np.correlate(x, x, mode='full')\n return result[int(result.size / 2):]",
"def autocorrelation(x):\n x = (x - np.mean(x)) / (np.std(x) * np.sqrt(len(x)))\n result = np.correlate(x, x, mode='full')\n return result[int(result.size / 2):]",
"def _calculate_autocorrelations(self):\n\n self._autocorr_real_x = self.__calculate_autocorr(self._noise_field_real, self._n_x, self._n_y, 'x')\n self._autocorr_real_y = self.__calculate_autocorr(self._noise_field_real, self._n_y, self._n_x, 'y')\n self._autocorr_imag_x = self.__calculate_autocorr(self._noise_field_imag, self._n_x, self._n_y, 'x')\n self._autocorr_imag_y = self.__calculate_autocorr(self._noise_field_imag, self._n_y, self._n_x, 'y')",
"def calculate(Freq, f1, f2, stream, \n seconds_E, seconds_P, seconds_S, \n dt, dt_P, dt_S, Settings, dist, Acoda=None):#azBA=0):\n \n # save original trace\n original_trace = stream[0].copy()\n T0 = original_trace.stats.starttime\n \n sr = float(\"%.0f\" % original_trace.stats.sampling_rate) # 100 or 20 Hz normally\n \n if sr == 50.:\n if Settings[\"verbose\"]:\n print(\"\\nLets not use Irkut-24 digitizer data\")\n return\n \n #===\n # according to [Aki 1980] [Pavlenko 2008] [etc.]\n # нужно брать коду за фиксированныое время - например 40 с после Т0 (время в очаге)\n # однако для расстояний > 70 км, 40 сек будет раньше времени 2*Ts !\n \n #===\n # time of Coda start\n dt_Coda = dt + Acoda\n \n # how much is coda start and end in seconds\n # cut Coda from Coda1 to Coda2 - -10s back -- +10s forward (if SD == 20)\n Coda1, Coda2 = Acoda - SD/2, Acoda + SD/2\n \n # time of coda start \n dt_Coda1 = dt + Coda1\n # and End\n dt_Coda2 = dt + Coda2\n \n # check, maybe A100 < 2*Ts, then skip it\n if Acoda < (seconds_S * 2):\n if Settings[\"verbose\"]:\n print(\"\\nA`%d` amplitude is too earlier then `%.0f`!\" % (Acoda, Coda1))\n # exit only if diff > 10-15 s, cause we can use anyway 1.5*Ts\n if Acoda < (seconds_S * 2 + 10):\n return\n \n # calc SD1 manually (if not set)\n # or use fixed length for P- and S-waves\n SD1 = Settings['sd1']\n if SD1 == 0:\n # window for Direct body-waves = 0.25 * Ts\n SD1 = int( math.ceil(0.25 * seconds_S) )\n\n # P-wave window is half of S-window\n SD1P = int(math.ceil(SD1 / 2))\n \n # check: S-window and Coda-window may intersects!\n if (Coda1 - seconds_S) < SD1:\n if Settings[\"verbose\"]:\n print(\"\\nWindow S and Coda intersects for event `%s`!\" % dt)\n return\n \n #===\n # save resulting values here\n result = []\n \n # filtering\n filter_params = {'freqmin': f1, 'freqmax': f2, 'type': 'bandpass', \n 'corners': Settings[\"corners\"], 'zerophase': True}\n # Butterworth filter in place\n stream.filter(**filter_params)\n \n if Settings[\"component\"] == \"H\":\n # get ?N and ?E channels\n __N = stream.select(channel='?HN')[0].data\n __E = stream.select(channel='?HE')[0].data\n # its Hilbert transform\n hilbN = scipy.fftpack.hilbert(__N)\n hilbE = scipy.fftpack.hilbert(__E)\n # calc resulting components\n __N = np.power(__N, 2) + np.power(hilbN, 2)\n __N = np.power(__N, 0.5)\n \n __E = np.power(__E, 2) + np.power(hilbE, 2)\n __E = np.power(__E, 0.5)\n \n # and it's Average over two channels (result of calculations)\n RMS_component = np.mean( np.array([ __N, __E ]), axis=0 )\n # remove 2 components\n for _ in range(2): stream.pop(0)\n stream[0].data = RMS_component\n elif Settings[\"component\"] in (\"N\", \"E\", \"Z\"):# use just 1 channel\n # channel data\n __Z = stream.select(channel='?HZ')[0].data\n # its Hilbert transform\n hilbZ = scipy.fftpack.hilbert(__Z)\n # resulting envelope\n __Z = np.power(__Z, 2) + np.power(hilbZ, 2)\n # square root from it\n stream[0].data = np.power(__Z, 0.5)\n stream[0].stats.channel = \"RMS\"\n \n #=== start calculating\n # for Qp we use only Z-channel!!! for Qs - selected...\n assert stream.count() == 1, \"\\nMust be 1 trace! we have: %s\" % stream\n \n trace = stream[0]\n \n # check SNR on filtered signal!\n SNR, RMS_NOISE = calc_signal_noise_ratio(trace, sr, dt, dt_Coda2, 3)# 3 sec\n \n # save SNR\n result += [SNR]\n \n if SNR < Settings['minsnr']:\n if Settings[\"verbose\"]:\n print(\"\\nSNR for freq %s is too SMALL (%.1f)!...\" % (Freq, SNR))\n return result\n \n #===================================================================\n #=== Calculations\n \n # get envelope around 40 (or 100) second, aorund Acoda\n envelope_part = trace.slice(starttime=dt_Coda1, endtime=dt_Coda2)\n envelope_times = envelope_part.times() + Coda1\n \n # calc A100 value\n # just mean value like Eulenfeld does: tr.data = tr.data / np.mean(sl.data)\n y100 = np.mean(envelope_part.data) # mean value\n y100_RMS = RMS(envelope_part.data) # RMS\n \n #===\n # get filtered parts of data\n \n # P-window data\n # check that P-window and S-window do not intersects!\n if (seconds_S - seconds_P) <= SD1P:\n if Settings[\"verbose\"]:\n print(\"\\nWindow S and P intersects for %s!\" % dt)\n Pwindow = None\n else:\n Pwindow = stream.slice(dt_P, dt_P+SD1P)[0].data\n \n # S-window data\n Swindow = stream.slice(dt_S, dt_S+SD1)[0].data\n \n #===========================================================================\n # *** Crucial place for calculations ***\n \n # result for P-bulk-window\n P_window_result = get_value_for_window(Pwindow, maximum=Settings[\"max\"])\n # S-window\n S_window_result = get_value_for_window(Swindow, maximum=Settings[\"max\"])\n \n #TODO: remove coda_value, we calc RMS(Acoda), A100, A40 etc\n coda_value = 0#get_value_for_window(coda_window, maximum=Settings[\"max\"])\n \n # save results +A100 +slope koeff\n result += [P_window_result, S_window_result, coda_value, y100, y100_RMS]\n \n #===========================================================================\n #=== calculations Ended\n \n \n #===========================================================================\n # Move all plotting issues here, move out of calculating code\n #===========================================================================\n \n if PLOT:\n # make Figure\n fig, (ax1, ax2) = plt.subplots(figsize=(12, 7), nrows=2, ncols=1, sharex=True)#, dpi=200)\n \n EVENT_NAME = \"Dist = {:.0f} km | Stream {}--{} | Freq {} Hz ({}-{})\".format(dist, \n stream[0].stats.starttime, stream[0].stats.endtime, Freq, f1, f2)\n fig.suptitle(EVENT_NAME, fontsize=FONTSIZE)\n \n # plot original signal\n ax1.plot(original_trace.times(), np.abs(original_trace.data), \n color=\"grey\", lw=.5, zorder=111, label=\"NT.%s.00.BHN+BHE\" % STATION)\n # plot filtered signal\n ax1.plot(trace.times(), trace.data, \"b\", lw=1., zorder=222, label=\"RMS-envelope\")\n \n # plot filtered\n #labelFiltered = 'Фильтр {}-{} Гц'.format(f1, f2)\n ax2.semilogy(trace.times(), trace.data, \"k\", lw=0.5, zorder=222, alpha=0.75)#label=labelFiltered, \n \n # mark time of Event, P and S time by vertical lines, start -- end\n for ax in (ax1, ax2):\n ax.axvline(x=seconds_E, linestyle=\"--\", color=\"y\", lw=2.) # Event\n ax.axvline(x=seconds_P, linestyle=\"--\", color=\"b\", lw=0.5) # P\n ax.axvline(x=seconds_P+SD1P, linestyle=\"--\", color=\"b\", lw=0.5) # P+SD1p\n # S-window\n ax.axvline(x=seconds_S, linestyle=\"--\", color=\"k\", lw=1) # S\n ax.axvline(x=seconds_S+SD1, linestyle=\"--\", color=\"k\", lw=1) # S+SD1\n # mark coda\n ax.axvline(x=Coda1, linestyle=\"--\", color=\"r\", lw=2.) # coda\n ax.axvline(x=Coda2, linestyle=\"--\", color=\"r\", lw=2) # coda\n # mark noise RMS_NOISE\n ax.axhline(y=RMS_NOISE, linestyle=\"--\", color=\"c\", lw=.5) # noiz\n \n ax1.plot(Acoda, y100, \"om\", markersize=7, markeredgecolor=\"k\", zorder=9999)\n ax2.plot(Acoda, y100, \"om\", markersize=7, markeredgecolor=\"k\", zorder=9999)\n \n # mark result for S-window\n ax2.plot(seconds_S, S_window_result, \"om\", markersize=7, \n markeredgecolor=\"k\", zorder=999)\n \n # plot ec. details\n y_pos = trace.data.max() / 2\n \n #=== selected windows\n # P\n P_time = trace.times()[:Pwindow.size] + seconds_P\n ax2.plot(P_time, Pwindow, \"-r\", lw=1.)\n \n # plot S-window (Swindow)\n S_time = trace.times()[:Swindow.size] + seconds_S\n ax2.plot(S_time, Swindow, \"-r\", lw=1.)\n \n # Coda (envelope) on second axis\n ax2.plot(envelope_times, envelope_part.data, \"-r\", lw=1.)\n \n # plot SNR value text\n ax1.text(x=seconds_E+0.5, y=y_pos, s=\"%.1f\" % SNR) # SNR\n \n # maximum Coda amplitude\n ax2.text(x=Coda1, y=y100_RMS, s=\"%.1f\" % y100_RMS) #coda MAX\n \n # axis limits\n ax2.set_xlim(seconds_E - 5, Coda2 + 5)\n # y\n ax1.set_ylim(-5, original_trace.data.max())\n ax2.set_ylim(None, trace.data.max()*1.5)\n \n # legend\n ax1.legend(loc=\"best\")\n #ax2.legend(loc=\"best\")#upper right\n \n plt.tight_layout()\n \n if Settings[\"show\"]:\n plt.show()\n else:\n # just save Figure\n save_to_dir = os.path.join(\"img\", STATION, str(stream[0].stats.starttime)[:19].replace(\":\", \"-\"))\n if not os.path.exists(save_to_dir): os.makedirs(save_to_dir)\n outfilename = os.path.join(save_to_dir, \"{}_{}_{}__{}s.png\".format(STATION, Settings[\"component\"], Freq, SD))\n plt.savefig(outfilename)\n\n # the end\n return result",
"def acor_fn(x):\n n = len(x)\n f = np.fft.fft(x-np.mean(x), n=2*n)\n acf = np.fft.ifft(f * np.conjugate(f))[:n].real\n return acf / acf[0]",
"def acor_fn(x):\n n = len(x)\n f = np.fft.fft(x-np.mean(x), n=2*n)\n acf = np.fft.ifft(f * np.conjugate(f))[:n].real\n return acf / acf[0]",
"def phase(self):\r\n\r\n #XXX calcluate this from the standard output, instead of recalculating:\r\n\r\n tseries_length = self.input.data.shape[0]\r\n spectrum_length = self.spectrum.shape[-1]\r\n\r\n phase = np.zeros((tseries_length,\r\n tseries_length,\r\n spectrum_length))\r\n\r\n for i in range(tseries_length):\r\n for j in range(i, tseries_length):\r\n phase[i][j] = np.angle(\r\n self.spectrum[i][j])\r\n\r\n phase[j][i] = np.angle(\r\n self.spectrum[i][j].conjugate())\r\n return phase",
"def autocorr(se, depth=21):\r\n dfa = pd.DataFrame(se)\r\n for i in range(depth):\r\n dfa['z{0}'.format(i + 1)] = se.shift(i + 1)\r\n cr = dfa.corr()\r\n cr.index = range(depth + 1)\r\n return cr[se.name]",
"def autocorrelate(x):\n\n global i1, i2\n # used for transposes\n t = roll(range(x.ndim), 1)\n\n # pairs of indexes\n # the first is for the autocorrelation array\n # the second is the shift\n ii = [list(enumerate(range(1, s - 1))) for s in x.shape]\n\n # initialize the resulting autocorrelation array\n acor = empty(shape=[len(s0) for s0 in ii])\n\n # iterate over all combinations of directional shifts\n for i in product(*ii):\n # extract the indexes for\n # the autocorrelation array\n # and original array respectively\n i1, i2 = asarray(i).T\n\n x1 = x.copy()\n x2 = x.copy()\n\n for i0 in i2:\n # clip the unshifted array at the end\n x1 = x1[:-i0]\n # and the shifted array at the beginning\n x2 = x2[i0:]\n\n # prepare to do the same for\n # the next axis\n x1 = x1.transpose(t)\n x2 = x2.transpose(t)\n\n # normalize shifted and unshifted arrays\n x1 -= x1.mean()\n x1 /= x1.std()\n x2 -= x2.mean()\n x2 /= x2.std()\n\n # compute the autocorrelation directly\n # from the definition\n acor[tuple(i1)] = (x1 * x2).mean()\n\n return acor",
"def _phase_detect(acc_z):\n acc_mag_sd = pd.Series(acc_z).rolling(100).std(center=True)\n min_sd = 1.5\n mov = np.where(acc_mag_sd >= min_sd)[0]\n phase = np.zeros(len(acc_z)).astype(int)\n phase[mov] = 1\n\n return phase",
"def chisqdata_cphase_fft(Obsdata, Prior, fft_pad_frac=1):\n clphasearr = Obsdata.c_phases(mode=\"all\", count=\"min\")\n uv1 = np.hstack((clphasearr['u1'].reshape(-1,1), clphasearr['v1'].reshape(-1,1)))\n uv2 = np.hstack((clphasearr['u2'].reshape(-1,1), clphasearr['v2'].reshape(-1,1)))\n uv3 = np.hstack((clphasearr['u3'].reshape(-1,1), clphasearr['v3'].reshape(-1,1)))\n clphase = clphasearr['cphase']\n sigma = clphasearr['sigmacp']\n\n npad = fft_pad_frac * np.max((Prior.xdim, Prior.ydim))\n\n im_info = (Prior.xdim, Prior.ydim, npad, Prior.psize, Prior.pulse)\n\n A = (im_info, [uv1, uv2, uv3])\n\n return (clphase, sigma, A)",
"def global_phase_correction(imgs_shifted_ft):\n nangles = imgs_shifted_ft.shape[0]\n phase_corrections = np.zeros((nangles))\n\n # todo: should weight by SNR, or something like this\n for ii in range(nangles):\n phase_corrections[ii] = np.angle(np.sum(imgs_shifted_ft[ii, 0] * imgs_shifted_ft[ii, 1].conj()))\n\n return phase_corrections",
"def autocorrelation_1d(data):\n\n N = len(data)\n n_fft = select_power_of_two(N)\n\n # Pad the signal with zeros to avoid the periodic images.\n\n R_data = np.zeros(2*n_fft)\n R_data[:N] = data\n\n F_data = np.fft.fft(R_data)\n\n result = np.fft.ifft(F_data*F_data.conj())[:N].real/(N-np.arange(N))\n\n return result[:N]",
"def acf(t, largest_prime=500):\n\n T = np.array(t)\n\n # Don't allow a prime factor larger than 'largest_prime'. Truncate data until that condition is met\n l = 2 * T.shape[0] - 1\n\n while largest_prime_factor(l) >= largest_prime or l % 2 == 0:\n l -= 1\n\n T = T[:(l + 1) // 2, ...] # '...' allows for no second dimension if only a single time series is analysed\n length = T.shape[0] * 2 - 1\n\n T -= np.mean(T, axis=0)\n\n fftx = np.fft.fft(T, n=length, axis=0)\n ret = np.fft.ifft(fftx * np.conjugate(fftx), axis=0)\n ret = np.fft.fftshift(ret, axes=(0,))\n\n autocorr_fxn = ret[length // 2:].real\n\n if len(autocorr_fxn.shape) > 1:\n autocorr_fxn /= np.arange(T.shape[0], 0, -1)[:, None]\n else:\n autocorr_fxn /= np.arange(T.shape[0], 0, -1)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n autocorr_fxn /= np.var(T, axis=0)\n\n return autocorr_fxn # normalized",
"def autocorr_imseq(stack):\n def autocorr(x):\n x = (x-x.mean()) / x.std()\n result = np.correlate(x, x, mode='full')/len(x)\n return result[len(result)//2:]\n \n# samples = []\n# for num, i in seq.iterrows():\n# X, Y, I = corrLib.divide_windows(io.imread(i.Dir), windowsize=[50, 50], step=300)\n# samples.append(I)\n# stack = np.stack(samples)\n r = stack.reshape((stack.shape[0], stack.shape[1]*stack.shape[2])).transpose()\n ac_list = []\n for x in r:\n ac = autocorr(x)\n ac_list.append(ac)\n ac_stack = np.stack(ac_list)\n ac_mean = ac_stack.mean(axis=0)\n return ac_mean",
"def autocorr(self, x):\n X = rfft(x, n=(x.shape[1]*2-1), axis=1)\n xr = irfft(X * X.conjugate(), axis=1).real\n xr = fftshift(xr, axes=1)\n xr = xr.sum(axis=1)\n return xr",
"def PSD_to_ACF(freq, psd, lags):\n freq_sym = np.append(-freq[::-1], freq) \n psd_sym = np.append(psd[::-1], psd)\n\n steps = freq_sym[1:] - freq_sym[:-1]\n height = psd_sym[1:]\n\n # nd = np.tile(freq_sym[1:], (len(lags), 1)).T\n nd = np.tile(freq_sym, (len(lags), 1)).T\n\n # acf = np.cos(-2*np.pi*nd*lags)*(height*steps)[:, np.newaxis]\n # acf = acf.sum(axis=0)\n\n acf = scipy.integrate.simps(np.cos(-2*np.pi*nd*lags)*psd_sym[:, np.newaxis], freq_sym, axis=0)\n\n # for l in lags:\n # ac = scipy.integrate.simps(np.cos(-2*np*pi*freq_sym*l) * psd_sym, freq_sym)\n return acf",
"def series_autocorr(series, lag=1):\n op = DataFrameCorr(other=series.shift(lag), method=\"pearson\")\n return op(series)",
"def plotting_autocorr(dataframe):\n plot_acf(dataframe['STU'].iloc[1:], lags=40)\n plt.show()"
] | [
"0.6106281",
"0.5915008",
"0.5743236",
"0.56838435",
"0.56832486",
"0.5655305",
"0.56056494",
"0.55680525",
"0.5545803",
"0.55240375",
"0.55095017",
"0.54883564",
"0.54883564",
"0.5412182",
"0.5382874",
"0.5338562",
"0.5338562",
"0.52966046",
"0.5273439",
"0.52382296",
"0.5204852",
"0.51955986",
"0.5178602",
"0.5165725",
"0.51600236",
"0.51586246",
"0.5157804",
"0.5145917",
"0.51408833",
"0.50973666"
] | 0.5928678 | 1 |
Calculate phase cross correlation (pcc) between signal1 and signal2 For this purpose signal2 is shifted in time and compared to corresponding portion in signal1 | def _xcorr_trace(signal1, signal2, **kwargs):
kwargs['mode'] = 'pcc'
kwargs['lags'] = __default_lags_if_not_set(signal1, signal2, **kwargs)
pcc_signal = phasecorr.xcorr(signal1.data, signal2.data, **kwargs)
trace = _tr.Trace(data=pcc_signal)
__writeheader(trace, signal1, **kwargs)
return trace | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cphase(h1, h2):\n\n for h in (h1, h2):\n h.assert_ket_space()\n\n field = h1.base_field\n\n d = h1.dim()\n if h2.dim() != d:\n raise HilbertError('spaces must be of the same dimension')\n\n ret = (h1*h2).O.array()\n for (j, a) in enumerate(h1.index_iter()):\n for (k, b) in enumerate(h2.index_iter()):\n ret[{ h1: a, h1.H: a, h2: b, h2.H: b }] = field.fractional_phase(j*k, d)\n return ret",
"def cross_correlation(values1, values2, lags=100):\n lags, corr, line, x = pl.xcorr( values1, values2, maxlags=lags, usevlines=False, marker=None)\n return lags, corr",
"def fft_crosscov(im1, im2):\n fft1_conj = np.conj(np.fft.fft2(im1))\n fft2 = np.fft.fft2(im2)\n normalize = abs(fft2*fft1_conj)\n normalize[normalize == 0] = 1 # prevent divide by zero error\n cross_power_spectrum = (fft2*fft1_conj)/normalize\n crosscov = np.fft.ifft2(cross_power_spectrum)\n crosscov = np.real(crosscov)\n return fft_shift(crosscov)",
"def cross_correlation(field1, field2):\n array_len = len(field1)\n # Take the index of the largest value in the array of correlation values calculated via a full convolve\n # cross correlation.\n arg_max = np.argmax((np.correlate([float(i) for i in field1], [float(i) for i in field2], mode='full')))\n # Map the index of the largest correlation value to that of the season lag between metrics\n return -(int(np.arange(-array_len+1, array_len)[arg_max]))",
"def circular_cross_correlation(x, y):\n return tf.real(tf.ifft(tf.multiply(tf.conj(tf.fft(tf.cast(x, tf.complex64))) , tf.fft(tf.cast(y, tf.complex64)))))",
"def correlation_1d(data1, data2):\n\n N = len(data1)\n assert N == len(data2)\n n_fft = select_power_of_two(N)\n\n # Pad the signal with zeros to avoid the periodic images.\n R_data1 = np.zeros(2*n_fft)\n R_data1[:N] = data1\n R_data2 = np.zeros(2*n_fft)\n R_data2[:N] = data2\n F_data1 = np.fft.fft(R_data1)\n F_data2 = np.fft.fft(R_data2)\n result = np.fft.ifft(F_data1.conj()*F_data2)\n positive_time = result[:N].real/(N-np.arange(N))\n negative_time = result[-N+1:][::-1].real/(N-1-np.arange(N-1))\n\n return np.concatenate((negative_time[::-1], positive_time))",
"def test_coherence_phase_delay():\r\n\r\n # Set up two time-series with a known phase delay:\r\n nz = np.random.rand(t.shape[-1])\r\n x = np.sin(t) + nz\r\n y = np.sin(t + np.pi) + nz\r\n\r\n tseries = np.vstack([x, y])\r\n for method in methods:\r\n f1, pdelay = tsa.coherency_phase_spectrum(tseries, csd_method=method)\r\n f2, tdelay = tsa.coherency_phase_delay(tseries, csd_method=method)\r\n npt.assert_almost_equal(pdelay[0, 1], -pdelay[1, 0])\r\n npt.assert_almost_equal(tdelay[0, 1], -tdelay[1, 0])\r\n # This is the relationship between these two quantities:\r\n npt.assert_almost_equal(tdelay[0, 1],\r\n pdelay[0, 1][1:] / (2 * np.pi * f2))",
"def phase_dist(phi1,phi2=None):\n shape = phi1.shape\n \n if phi2 is None:\n dist = np.abs(phi1).ravel()\n else:\n dist = np.abs(phi1-phi2).ravel()\n dist[dist>np.pi] = np.pi - dist[dist>np.pi]%np.pi\n return dist.reshape(shape)",
"def coherency_phase_spectrum(time_series, csd_method=None):\r\n if csd_method is None:\r\n csd_method = {'this_method': 'welch'} # The default\r\n\r\n f, fxy = get_spectra(time_series, csd_method)\r\n\r\n p = np.zeros((time_series.shape[0],\r\n time_series.shape[0],\r\n f.shape[0]))\r\n\r\n for i in range(time_series.shape[0]):\r\n for j in range(i + 1, time_series.shape[0]):\r\n p[i][j] = np.angle(fxy[i][j])\r\n p[j][i] = np.angle(fxy[i][j].conjugate())\r\n\r\n return f, p",
"def correlation(\n self,\n freq_1: float,\n time_1: float,\n freq_2: Optional[float] = None,\n time_2: Optional[float] = None,\n dw: Optional[tuple] = (1.0, 1.0),\n dagg: Optional[tuple] = (1, 0),\n interaction_picture: Optional[bool] = False,\n change_only: Optional[bool] = False,\n progress_type: Optional[Text] = None) -> complex:\n dt = self._process_tensor.dt\n if time_2 is None:\n time_2 = time_1\n if freq_2 is None:\n freq_2 = freq_1\n self.generate_system_correlations(time_2, progress_type)\n corr_mat_dim = int(np.round(time_2/dt))\n _sys_correlations = self._system_correlations[:corr_mat_dim,\n :corr_mat_dim]\n _sys_correlations = np.nan_to_num(_sys_correlations)\n re_kernel,im_kernel = self._calc_kernel(freq_1, time_1,\n freq_2, time_2, dagg)\n coup_1 = dw[0] * self._bath.correlations.spectral_density(freq_1)**0.5\n coup_2 = dw[1] * self._bath.correlations.spectral_density(freq_2)**0.5\n correlation = np.sum(_sys_correlations.real*re_kernel + \\\n 1j*_sys_correlations.imag*im_kernel) * \\\n coup_1 * coup_2\n if (not change_only) and (freq_1 == freq_2) \\\n and (dagg in ((1, 0), (0, 1))):\n if self._temp > 0:\n correlation += np.exp(-freq_1/self._temp) \\\n / (1 - np.exp(-freq_1/self._temp))\n if dagg == (0, 1):\n correlation += 1\n\n if not interaction_picture:\n correlation *= np.exp(1j * ((2*dagg[0] - 1) * freq_2 * time_2 + \\\n (2*dagg[1] - 1) * freq_1 * time_1))\n return correlation",
"def periodic_corr(x, y):\r\n return np.fft.ifft(np.fft.fft(x) * np.fft.fft(y).conj()).real",
"def cross_correlation(arr1, arr2):\n faxes = lambda x: tuple(np.arange(x.ndim - 1) + 1)\n\n return pipe(\n arr1,\n dafftn(axes=faxes(arr1)),\n lambda x: daconj(x) * dafftn(arr2, axes=faxes(arr2)),\n daifftn(axes=faxes(arr1)),\n dafftshift(axes=faxes(arr1)),\n lambda x: x.real / arr1[0].size,\n )",
"def crosscorr(datax, datay, lag=0):\n return datax.corr(datay.shift(lag))",
"def fringes_morlet_phase(m1,m2, quasi_pi=False):\n ### cross spectrum\n cross_spec = np.conj(m1.cwt)*m2.cwt\n phi = np.angle(cross_spec)\n if quasi_pi:\n phi = np.mod(phi + np.pi/2, 2*np.pi)\n weight = abs(m1.cwt)*abs(m2.cwt)\n phase = np.sum(phi*weight, axis=0)/np.sum(weight, axis=0)\n if quasi_pi:\n phase -= np.pi/2\n return phase",
"def _xcorrf(self, profile1, profile2, dx):\n corrf = np.correlate(profile2, profile1, mode = 'same') \\\n /np.sum(profile1**2)\n\n if np.isnan(corrf).any():\n displ = np.nan\n corr = 0\n else:\n displ = (np.where(corrf == np.max(corrf))[0][0] - len(corrf)//2)*dx\n corr = np.max(corrf)\n\n return displ, corr",
"def gen_samp_corput(self, i1, i2):\n dist = self.dist\n \n def corput_sequence ( i1, i2 ):\n \n n = abs ( i2 - i1 ) + 1\n r = np.zeros ( n )\n \n if ( i1 <= i2 ):\n i3 = +1\n else:\n i3 = -1\n \n j = 0\n \n for i in range ( i1, i2 + i3, i3 ):\n \n if ( i < 0 ):\n s = -1\n else:\n s = +1\n \n t = abs ( int ( i ) )\n \n base_inv = 0.5\n \n r[j] = 0.0\n \n while ( t != 0 ):\n d = ( t % 2 )\n r[j] = r[j] + d * base_inv\n base_inv = base_inv / 2.0\n t = ( t // 2 )\n \n r[j] = r[j] * s\n \n j = j + 1 \n \n return r \n \n corput_seq = corput_sequence(i1, i2)\n \n samp = dist.ppf(corput_seq)\n return(samp.reshape(1, -1))",
"def determine_correlation(var1,var2):\n v1 = np.array(var1)\n v2 = np.array(var2)\n mat = np.c_[(v1,v2)]# np.vstack((v1,v2)) #\n corr = np.corrcoef(mat.T)\n return corr[0][1]",
"def chisqdata_cphase_fft(Obsdata, Prior, fft_pad_frac=1):\n clphasearr = Obsdata.c_phases(mode=\"all\", count=\"min\")\n uv1 = np.hstack((clphasearr['u1'].reshape(-1,1), clphasearr['v1'].reshape(-1,1)))\n uv2 = np.hstack((clphasearr['u2'].reshape(-1,1), clphasearr['v2'].reshape(-1,1)))\n uv3 = np.hstack((clphasearr['u3'].reshape(-1,1), clphasearr['v3'].reshape(-1,1)))\n clphase = clphasearr['cphase']\n sigma = clphasearr['sigmacp']\n\n npad = fft_pad_frac * np.max((Prior.xdim, Prior.ydim))\n\n im_info = (Prior.xdim, Prior.ydim, npad, Prior.psize, Prior.pulse)\n\n A = (im_info, [uv1, uv2, uv3])\n\n return (clphase, sigma, A)",
"def crosscorr(datax, datay, lag=0):\n return datax.corr(datay.shift(lag))",
"def correlate(array1,array2):\r\n arrayout = np.conj(fft2(array1)) * fft2(array2)\r\n return ifft2(arrayout)",
"def phase_swap_operator(self, x1, x2):\r\n return np.fft.ifft(np.abs(np.fft.fft(x1))*np.angle(np.fft.fft(x2)))",
"def sig_corr(self, s1, s2, comp_length):\n\n # np.corrcoef returns an array of coefficients -\n # the simple 'R' value is at row 1, col 0\n return np.corrcoef(\n self.max_freq[s1:s1+comp_length],\n self.max_freq[s2:s2+comp_length])[1, 0]",
"def cross_correlation(vol1, vol2):\n var_1 = tf.reduce_sum(tf.square(vol1 - tf.reduce_mean(vol1)))\n var_2 = tf.reduce_sum(tf.square(vol2 - tf.reduce_mean(vol2)))\n cov_12 = tf.reduce_sum((vol2 - tf.reduce_mean(vol2)) * (vol1 - tf.reduce_mean(vol1)))\n score = cov_12 / tf.sqrt(var_1 * var_2 + 1e-5)\n score = -tf.cast(score, tf.float32)\n return score",
"def pearson_correlation_comparison(data, synth):\n mean_data = np.average(data)\n mean_synth = np.average(synth)\n cov_data_synth = np.sum((data-mean_data)*(synth-mean_synth))/len(data)\n PCC = cov_data_synth/(np.std(data)*np.std(synth)) # Pearson correlation coefficient (-1 to 1, where 0 is no correlation, -1 is anti-correlation and 1 is correlation.)\n if PCC<0.:\n PCC = 0.\n return PCC",
"def periodic_corr_np(x, y):\n\t#\n\t# src: https://stackoverflow.com/questions/28284257/circular-cross-correlation-python\n\t# circular cross correlation python\n\t#\n\t#\n\treturn np.correlate(x, np.hstack((y[1:], y)), mode='valid')",
"def icp_step(Points1,Points2):\r\n #get the correspondences\r\n S1,S2 = get_correspondences(Points1,Points2)\r\n\r\n # Center the resulting pairs substracting their means\r\n S1_shift, mean1 = subtract_mean(S1)\r\n S2_shift, mean2 = subtract_mean(S2)\r\n\r\n #calculate the error-minimizing rotation\r\n R = compute_error_minimizing_rotation(S1_shift,S2_shift)\r\n #find the t such that R*p+t = R*(p-mean2)+mean1\r\n Rmean2 = [R[0][0]*mean2[0]+R[0][1]*mean2[1],\r\n R[1][0]*mean2[0]+R[1][1]*mean2[1]]\r\n\r\n return R,[-(mean1[0]-Rmean2[0]),-(mean1[1]-Rmean2[1])]",
"def cross_correlation(x,y,time):\n import numpy as np\n modeC = \"same\"\n x = (x - np.mean(x))/np.std(x)\n y = (y - np.mean(y))/np.std(y)\n\n timeInt = np.diff(time).mean().days\n numPoints = len(x)\n fig = plt.figure(figsize=(6,3.5)) \n d = np.correlate(y,x,modeC)\n\n plt.plot([0,0],[-0.5,1],color=\"grey\")\n plt.xlabel(\"Lag\")\n plt.ylabel(\"Correlation\")\n plt.plot(np.linspace(len(x)/2*timeInt,-len(x)/2*timeInt,len(x)),d/numPoints)\n plt.show()",
"def PLV_Coh(X,Y,TW,fs):\n X = X.squeeze()\n ntaps = 2*TW - 1\n dpss = sp.signal.windows.dpss(X.size,TW,ntaps)\n N = int(2**np.ceil(np.log2(X.size)))\n f = np.arange(0,N)*fs/N\n PLV_taps = np.zeros([N,ntaps])\n Coh_taps = np.zeros([N,ntaps])\n Phase_taps = np.zeros([N,ntaps])\n for k in range(0,ntaps):\n print('tap:',k+1,'/',ntaps)\n Xf = sp.fft(X *dpss[k,:],axis=0,n=N)\n Yf = sp.fft(Y * dpss[k,:].reshape(dpss.shape[1],1),axis=0,n=N)\n XYf = Xf.reshape(Xf.shape[0],1) * Yf.conj()\n Phase_taps[:,k] = np.unwrap(np.angle(np.mean(XYf/abs(XYf),axis=1)))\n PLV_taps[:,k] = abs(np.mean(XYf / abs(XYf),axis=1))\n Coh_taps[:,k] = abs(np.mean(XYf,axis=1) / np.mean(abs(XYf),axis=1))\n \n PLV = PLV_taps.mean(axis=1)\n Coh = Coh_taps.mean(axis=1)\n Phase = Phase_taps.mean(axis=1)\n return PLV, Coh, f, Phase",
"def coherency_phase_delay(time_series, lb=0, ub=None, csd_method=None):\r\n if csd_method is None:\r\n csd_method = {'this_method': 'welch'} # The default\r\n\r\n f, fxy = get_spectra(time_series, csd_method)\r\n\r\n lb_idx, ub_idx = utils.get_bounds(f, lb, ub)\r\n\r\n if lb_idx == 0:\r\n lb_idx = 1\r\n\r\n p = np.zeros((time_series.shape[0], time_series.shape[0],\r\n f[lb_idx:ub_idx].shape[-1]))\r\n\r\n for i in range(time_series.shape[0]):\r\n for j in range(i, time_series.shape[0]):\r\n p[i][j] = _coherency_phase_delay(f[lb_idx:ub_idx],\r\n fxy[i][j][lb_idx:ub_idx])\r\n p[j][i] = _coherency_phase_delay(f[lb_idx:ub_idx],\r\n fxy[i][j][lb_idx:ub_idx].conjugate())\r\n\r\n return f[lb_idx:ub_idx], p",
"def crosscorrelation(x, y, nlags = 0):\n return [x.corr(y.shift(lag)) for lag in range(nlags + 1)]"
] | [
"0.66017336",
"0.6389433",
"0.61255634",
"0.6099715",
"0.6079657",
"0.6045117",
"0.59957165",
"0.5968298",
"0.5967114",
"0.59037614",
"0.5879406",
"0.5871049",
"0.5862538",
"0.585481",
"0.58367324",
"0.57887626",
"0.57637984",
"0.5761343",
"0.5755311",
"0.5738161",
"0.573262",
"0.572472",
"0.56960875",
"0.56874675",
"0.5659867",
"0.5658369",
"0.564643",
"0.56356466",
"0.5582213",
"0.5576323"
] | 0.69613063 | 0 |
Calculate phase auto correlation (pac) of signal1 For this purpose a shifted copy in time of signal1 is compared to corresponding portion in signal1 | def _acorr_trace(signal1, **kwargs):
kwargs['mode'] = 'pac'
kwargs['lags'] = __default_lags_if_not_set(signal1, signal1, **kwargs)
pac_signal = phasecorr.acorr(signal1.data, **kwargs)
trace = _tr.Trace(data=pac_signal)
__writeheader(trace, signal1, **kwargs)
return trace | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def constract(phase, magnitude):\n new_spectrum = magnitude * np.exp(1j * phase)\n\n # reverse the shift and FFT\n f_ishift = np.fft.ifftshift(new_spectrum)\n img_back = np.fft.ifft2(f_ishift)\n \n return np.abs(img_back)",
"def test_lag1Cor_Estimation(self):\n P = PSignal.PSignal(np.arange(10))\n spectrum = PeriodogramEngine.FourierAnalyser(P)\n self.assertAlmostEqual(spectrum.__estimateLag1Cor__(), 0.777777778)",
"def autocorr(self, x):\n X = rfft(x, n=(x.shape[1]*2-1), axis=1)\n xr = irfft(X * X.conjugate(), axis=1).real\n xr = fftshift(xr, axes=1)\n xr = xr.sum(axis=1)\n return xr",
"def global_phase_correction(imgs_shifted_ft):\n nangles = imgs_shifted_ft.shape[0]\n phase_corrections = np.zeros((nangles))\n\n # todo: should weight by SNR, or something like this\n for ii in range(nangles):\n phase_corrections[ii] = np.angle(np.sum(imgs_shifted_ft[ii, 0] * imgs_shifted_ft[ii, 1].conj()))\n\n return phase_corrections",
"def EstimatedAutocorr(fw, data, pnum, trialnum, marker1, marker2): \n cycle_start = HeelStrike(fw, data, pnum, trialnum, marker1, marker2)\n x = cycle_start[2] \n time = cycle_start[1]\n drop_NA = np.vstack((x, time))\n #print drop_NA.shape, x.shape, y.shape\n drop_NA = drop_NA.T\n x = drop_NA[:,0]\n #x = x[~np.isnan(x).any()]\n \n #n = len(x)\n #var = np.var(x)\n tao = np.correlate(x, x, mode='full')\n # assert np.allclose(r, np.array([(x[:n-k]*x[-(n-k):]).sum() for k in range(n)]))\n #result = r/(var*(np.arange(n, 0, -1)))\n plt.figure(4)\n plt.plot(tao)\n return tao",
"def detect_phase_shift(self, curr_frame):\n curr_frame = np.float32(\n cv2.cvtColor(curr_frame, cv2.COLOR_BGR2GRAY)) \n shift = cv2.phaseCorrelate(self.initial_frame, curr_frame) #get phase-shift \n return shift",
"def get_auto_corr(timeSeries1_pre,timeSeries2_pre,k):\n l=len(timeSeries1_pre)\n timeSeries1=timeSeries1_pre[0:l-k]\n timeSeries2=timeSeries2_pre[k:]\n timeSeries1_mean=timeSeries1.mean()\n timeSeries2_mean=timeSeries2.mean()\n ###doubt\n timeSeries1_std= np.sqrt(timeSeries1_pre.var()*len(timeSeries1_pre))\n timeSeries2_std= np.sqrt(timeSeries2_pre.var()*len(timeSeries2_pre))\n auto_corr = 0\n for i in xrange(l-k):\n if timeSeries1_std == 0 or timeSeries2_std == 0:\n return 0\n else:\n tmp=(timeSeries1[i]-timeSeries1_mean)*(timeSeries2[i]-timeSeries2_mean)/(timeSeries1_std*timeSeries2_std)\n auto_corr = auto_corr + tmp\n \n return auto_corr",
"def series_autocorr(series, lag=1):\n op = DataFrameCorr(other=series.shift(lag), method=\"pearson\")\n return op(series)",
"def autocorrelation(df,maxt,step,vari,acquisiton_time,division_time):\n maxt = int(maxt/acquisiton_time)\n step = int(step/acquisiton_time)\n df = connect_cells(df,vari)\n return np.vstack([correlation(df,Dt,vari) for Dt in\\\n np.arange(0,maxt,step)]),\\\n np.arange(0,maxt,step)*acquisiton_time/division_time",
"def phase(self):\r\n\r\n #XXX calcluate this from the standard output, instead of recalculating:\r\n\r\n tseries_length = self.input.data.shape[0]\r\n spectrum_length = self.spectrum.shape[-1]\r\n\r\n phase = np.zeros((tseries_length,\r\n tseries_length,\r\n spectrum_length))\r\n\r\n for i in range(tseries_length):\r\n for j in range(i, tseries_length):\r\n phase[i][j] = np.angle(\r\n self.spectrum[i][j])\r\n\r\n phase[j][i] = np.angle(\r\n self.spectrum[i][j].conjugate())\r\n return phase",
"def step_autocorrelation(self):\n\n max_hops = max([len(x) for x in self.steps])\n\n self.acf = np.zeros([len(self.steps), max_hops])\n\n keep = [] # list to hold indices of trajectories with a non-zero amount of hops\n for i in range(len(self.steps)):\n hops = self.steps[i]\n if len(hops) > 1:\n self.acf[i, :len(self.steps[i])] = timeseries.acf(self.steps[i])\n keep.append(i)\n\n self.acf = self.acf[keep, :]\n\n self.acf = np.array([self.acf[np.nonzero(self.acf[:, i]), i].mean() for i in range(max_hops)])\n\n #self.acf = timeseries.step_autocorrelation(self.z_interpolated.T[..., np.newaxis])",
"def autocorrelate(x):\n\n global i1, i2\n # used for transposes\n t = roll(range(x.ndim), 1)\n\n # pairs of indexes\n # the first is for the autocorrelation array\n # the second is the shift\n ii = [list(enumerate(range(1, s - 1))) for s in x.shape]\n\n # initialize the resulting autocorrelation array\n acor = empty(shape=[len(s0) for s0 in ii])\n\n # iterate over all combinations of directional shifts\n for i in product(*ii):\n # extract the indexes for\n # the autocorrelation array\n # and original array respectively\n i1, i2 = asarray(i).T\n\n x1 = x.copy()\n x2 = x.copy()\n\n for i0 in i2:\n # clip the unshifted array at the end\n x1 = x1[:-i0]\n # and the shifted array at the beginning\n x2 = x2[i0:]\n\n # prepare to do the same for\n # the next axis\n x1 = x1.transpose(t)\n x2 = x2.transpose(t)\n\n # normalize shifted and unshifted arrays\n x1 -= x1.mean()\n x1 /= x1.std()\n x2 -= x2.mean()\n x2 /= x2.std()\n\n # compute the autocorrelation directly\n # from the definition\n acor[tuple(i1)] = (x1 * x2).mean()\n\n return acor",
"def test_cache_to_phase():\r\n ij = [(0, 1), (1, 0)]\r\n x = np.sin(t) + np.sin(2 * t) + np.sin(3 * t) + np.random.rand(t.shape[-1])\r\n y = np.sin(t) + np.sin(2 * t) + np.sin(3 * t) + np.random.rand(t.shape[-1])\r\n ts = np.vstack([x, y])\r\n freqs, cache = tsa.cache_fft(ts, ij)\r\n ph = tsa.cache_to_phase(cache, ij)",
"def autocorr(sig):\n return float(np.correlate(sig, sig))",
"def step_autocorrelation(trajectories, axis=0):\n\n try:\n if len(axis) == 1:\n axis = axis[0]\n except TypeError:\n pass\n\n ntraj = trajectories.shape[1] # number of particles with a trajectory\n\n # calculate acf of first trajectory in order to determine size of output array. timeseries.acf will truncate\n # the array slightly in order to make the FFT efficient\n ACF = acf(trajectories[1:, 0, axis] - trajectories[:-1, 0, axis])\n acfs = np.zeros([ntraj, ACF.size])\n acfs[0, :] = ACF\n\n keep = []\n for t in range(1, ntraj):\n steps = trajectories[1:, t, axis] - trajectories[:-1, t, axis]\n if not np.all(steps == 0):\n acfs[t, :] = acf(steps)\n keep.append(t)\n #acfs[t, :] = acf(trajectories[:ACF.size, t, axis])\n\n return acfs[keep, :]",
"def autocorr_1d(x):\n x = jnp.atleast_1d(x)\n if len(x.shape) != 1:\n raise ValueError(\"invalid dimensions for 1D autocorrelation function\")\n n = next_pow_two(len(x))\n\n # Compute the FFT and then (from that) the auto-correlation function\n f = jnp.fft.fft(x - jnp.mean(x), n=2 * n)\n acf = jnp.fft.ifft(f * jnp.conjugate(f))[: len(x)].real\n acf /= acf[0]\n return acf",
"def calculate_overf_correlation(amp, index, f0, dt, n_lags):\n \n # Cast inputs as floats as I do a bunch of division.\n dt = float(dt)\n f0 = float(f0)\n index = float(index)\n # Number of points used in calculation needs to be at least 10 times bigger\n # than final number of point returned. This is so we are not affected by\n # the periodicity of the correlation function.\n buff_factor = 64\n n = buff_factor * n_lags\n n_return = n_lags\n # Generate the power spectrum.\n # Need to add a low frequency cut off, since there is an IR divergence.\n # Choose to cut off at 1/2df (so we get a bit of slope mode).\n power = overf_power_spectrum(amp, index, f0, dt, n,\n cut_off=1./n_lags/dt/2.0)\n # FFT it to the correlation function.\n corr = fft.ifft(power)\n # Complex part should be zero.\n corr = corr.real\n # In previous versions of this function, we shifted the output function.\n # however this screws up positive definiteness of the correlation matrix\n # and is unnecessary if you have the IR cut off.\n #corr -= corr[2 * n_return]\n # Trim to return size.\n corr = corr[:n_return]\n # To normalize, need to multiply by twice the bandwidth.\n corr *= 1.0/dt\n return corr",
"def correlate(array1,array2):\r\n arrayout = np.conj(fft2(array1)) * fft2(array2)\r\n return ifft2(arrayout)",
"def auto_correlation(values, lags=100):\n lags, corr, line, x = pl.acorr( values, maxlags=lags, usevlines=False, marker=None)\n return lags, corr",
"def phase_swap_operator(self, x1, x2):\r\n return np.fft.ifft(np.abs(np.fft.fft(x1))*np.angle(np.fft.fft(x2)))",
"def _xcorr_trace(signal1, signal2, **kwargs):\n\n kwargs['mode'] = 'pcc'\n kwargs['lags'] = __default_lags_if_not_set(signal1, signal2, **kwargs)\n\n pcc_signal = phasecorr.xcorr(signal1.data, signal2.data, **kwargs)\n\n trace = _tr.Trace(data=pcc_signal)\n __writeheader(trace, signal1, **kwargs)\n\n return trace",
"def autocorr(wave):\n lags = np.arange(len(wave.ys) // 2)\n corrs = [serial_corr(wave, lag) for lag in lags]\n return lags, corrs",
"def autocorrFFT(x):\n\n N = len(x)\n F = np.fft.fft(x, n=2*N) # 2*N because of zero-padding\n PSD = F * F.conjugate()\n res = np.fft.ifft(PSD)\n res = (res[:N]).real # now we have the autocorrelation in convention B\n n = N*np.ones(N) - np.arange(0, N) # divide res(m) by (N-m)\n\n return res / n # this is the autocorrelation in convention A",
"def autocorr(x):\n result = np.correlate(x, x, mode='full')/np.sum(x**2)\n return result[result.size//2:]",
"def _ac_fft3 (self,xp,max_lag):\n '''takes xp'''\n f = np.fft.fft(self.xp)\n p = np.array([np.real(v)**2+np.imag(v)**2 for v in f])\n pi = np.fft.ifft(p)\n corr = np.real(pi)[:self.n]/np.sum(self.xp**2)\n return corr[:max_lag]",
"def autocorrelation(x, nlags = 0):\n return [x.corr(x.shift(lag)) for lag in range(nlags + 1)]",
"def cache_to_relative_phase(cache, ij):\r\n # This is the way it is saved by cache_spectra:\r\n FFT_slices = cache['FFT_slices']\r\n FFT_conj_slices = cache['FFT_conj_slices']\r\n # norm_val = cache['norm_val']\r\n\r\n freqs = cache['FFT_slices'][ij[0][0]].shape[-1]\r\n\r\n ij_array = np.array(ij)\r\n\r\n channels_i = max(1, max(ij_array[:, 0]) + 1)\r\n channels_j = max(1, max(ij_array[:, 1]) + 1)\r\n #Pre-allocate for speed:\r\n Phi_xy = np.zeros((channels_i, channels_j, freqs), dtype=np.complex)\r\n\r\n #These checks take time, so do them up front, not in every iteration:\r\n if list(FFT_slices.items())[0][1].shape[0] > 1:\r\n if FFT_conj_slices:\r\n for i, j in ij:\r\n phi = np.angle(FFT_slices[i] * FFT_conj_slices[j])\r\n Phi_xy[i, j] = np.mean(phi, 0)\r\n\r\n else:\r\n for i, j in ij:\r\n phi = np.angle(FFT_slices[i] * np.conjugate(FFT_slices[j]))\r\n Phi_xy[i, j] = np.mean(phi, 0)\r\n\r\n else:\r\n if FFT_conj_slices:\r\n for i, j in ij:\r\n Phi_xy[i, j] = np.angle(FFT_slices[i] * FFT_conj_slices[j])\r\n\r\n else:\r\n for i, j in ij:\r\n Phi_xy[i, j] = np.angle(FFT_slices[i] *\r\n np.conjugate(FFT_slices[j]))\r\n\r\n return Phi_xy",
"def correlation_1d(data1, data2):\n\n N = len(data1)\n assert N == len(data2)\n n_fft = select_power_of_two(N)\n\n # Pad the signal with zeros to avoid the periodic images.\n R_data1 = np.zeros(2*n_fft)\n R_data1[:N] = data1\n R_data2 = np.zeros(2*n_fft)\n R_data2[:N] = data2\n F_data1 = np.fft.fft(R_data1)\n F_data2 = np.fft.fft(R_data2)\n result = np.fft.ifft(F_data1.conj()*F_data2)\n positive_time = result[:N].real/(N-np.arange(N))\n negative_time = result[-N+1:][::-1].real/(N-1-np.arange(N-1))\n\n return np.concatenate((negative_time[::-1], positive_time))",
"def _phase_detect(acc_z):\n acc_mag_sd = pd.Series(acc_z).rolling(100).std(center=True)\n min_sd = 1.5\n mov = np.where(acc_mag_sd >= min_sd)[0]\n phase = np.zeros(len(acc_z)).astype(int)\n phase[mov] = 1\n\n return phase",
"def phase_dist(phi1,phi2=None):\n shape = phi1.shape\n \n if phi2 is None:\n dist = np.abs(phi1).ravel()\n else:\n dist = np.abs(phi1-phi2).ravel()\n dist[dist>np.pi] = np.pi - dist[dist>np.pi]%np.pi\n return dist.reshape(shape)"
] | [
"0.59478575",
"0.5942706",
"0.58623695",
"0.58456415",
"0.58392256",
"0.5771846",
"0.57513154",
"0.57069325",
"0.5691568",
"0.569096",
"0.5653918",
"0.5615717",
"0.55893755",
"0.55863965",
"0.55757755",
"0.5560512",
"0.55482876",
"0.55477715",
"0.55407035",
"0.55402577",
"0.5525821",
"0.55245477",
"0.5494355",
"0.54578114",
"0.54495114",
"0.54448086",
"0.5443507",
"0.5429553",
"0.53821445",
"0.5366883"
] | 0.6709351 | 0 |
get random proxy from proxypool | def get_random_proxy():
url=requests.get(proxypool_url).text.strip()
#logger.info("now url is",url)
return url | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_random(self):\n return random.choice(self.proxies)",
"def get_proxy_pool(self,proxy_pool,num):\n\n url='{url}/proxy/?num={num}'.format(url=config.SERVER_URL,num=num)\n\n try:\n res=request.urlopen(url,timeout=5).read()\n res=str(res,encoding='utf8')\n except:\n time.sleep(5)\n check_server() # sleep until server is available\n try:\n res=request.urlopen(url,timeout=5).read()\n res=str(res,encoding='utf8')\n except Exception as e:\n err_str='error: client -> get_proxy_pool : unable to ' \\\n 'connect to proxy server '\n info_manager(err_str,type='KEY')\n if config.KEY_INFO_PRINT:\n print(e)\n return\n\n if 'no valid proxy' in res: # if server return no valid proxy, means server\n # cannot provide proxy to this client\n err_str='error: client -> get_proxy_pool : fail to ' \\\n 'get proxy from server'\n info_manager(err_str,type='KEY')\n time.sleep(1)\n return\n\n try:\n data=res.split(';') # 'url,timedelay;url,timedelay;.....'\n data=[proxy_object(x) for x in data]\n except Exception as e:\n err_str='error: client -> get_proxy_pool : fail to ' \\\n 'parse proxy str info:\\r\\n'+res\n info_manager(err_str,type='KEY')\n return\n\n proxy_pool[:]=proxy_pool[:]+data",
"def get_random_proxy_ip():\n return requests.get(proxy_url).text.strip()",
"def _get_random_pool(pool_list):\n if not pool_list:\n return None\n if len(pool_list) == 1:\n return pool_list[0]\n\n last = len(pool_list) - 1\n index = random.randint(0, last)\n return pool_list[index]",
"def get_proxy(self):\n result = self.database.zrangebyscore(self.key, MAX_IP_SCORE, MAX_IP_SCORE)\n if len(result):\n return random.choice(result)\n else:\n #山穷水尽\n raise RiverEndError",
"def randomize_request_proxies(renewed_proxy=''):\n if renewed_proxy:\n random_number = renewed_proxy\n else:\n first_random_number = random.randrange(0, len(proxies))\n random_number = first_random_number\n\n print(random_number)\n proxy = proxies[random_number]\n\n return {'http': f'http://{config.PROXY_USERNAME}:{config.PROXY_PASS}@{proxy}',\n 'https': f'http://{config.PROXY_USERNAME}:{config.PROXY_PASS}@{proxy}'}",
"def get_proxy_address(self):\n proxies = self.get_to_use_proxies()\n\n if not proxies:\n return None\n\n quality_proxy_quantities = max(6, int(len(proxies) * 0.5))\n quality_proxy_quantities = min(quality_proxy_quantities, len(proxies))\n\n proxy = random.choice(proxies[0:quality_proxy_quantities])\n _logger.debug(\"Using %s proxy\", proxy[\"http\"])\n return copy.deepcopy(proxy)",
"def proxy_get(self, site_url):\n import urllib2\n res = None\n # Fail times limit\n fail = 0\n while fail < 5:\n # Load a random proxy\n proxy_count, random_proxy = self.proxyDb.random('HTTP')\n del proxy_count\n proxy_url = \"http://user:password@\"+random_proxy[0]['address']+':'+str(random_proxy[0]['port'])\n proxy_support = urllib2.ProxyHandler({'http':proxy_url})\n opener = urllib2.build_opener(proxy_support)\n urllib2.install_opener(opener)\n # Format a request\n request = urllib2.Request(site_url, headers=header)\n try:\n # Verify whether the proxy is effective\n if ipVerify:\n print 'trainHook.py: Info: IP address verification'\n print urllib2.urlopen('http://icanhazip.com', timeout=4).read()[0:-1]\n # Send request to web api\n res = urllib2.urlopen(request, timeout=5).read()\n # Handle errors\n except Exception as error:\n print 'trainHook.py: Error: Request error occurs'\n print error\n fail = fail + 1\n random_proxy[0]['fail_times'] = random_proxy[0]['fail_times'] + 1\n # write feedback to proxy database.\n finally:\n random_proxy[0]['connect_times'] = random_proxy[0]['connect_times'] + 1\n self.proxyDb.update_status(random_proxy[0]['proxy_id'],\n random_proxy[0]['connect_times'],\n random_proxy[0]['fail_times'])\n if res is not None:\n break\n return res",
"def proxy_scrape(self):\n print(\"Getting new live proxies\")\n url = 'https://free-proxy-list.net/'\n response = requests.get(url)\n parser = fromstring(response.text)\n proxies = set()\n for i in parser.xpath('//tbody/tr')[:20]:\n # if i.xpath('.//td[7][contains(text(),\"yes\")]'):\n proxy = \":\".join([i.xpath('.//td[1]/text()')\n [0], i.xpath('.//td[2]/text()')[0]])\n proxies.add(proxy)\n # return proxies\n # proxies=[]\n print(\"Obtained proxied are as : \", proxies)\n proxy_pool = cycle(proxies)\n proxy_list = [proxy for proxy in proxies]\n return proxy_pool, proxy_list",
"def recur_get_lst(self, random_proxy):\n idx = len(self.existed_url) - random.choice(range(1, 9))\n web_driver = self.create_driver(random_proxy=random_proxy, login=False)\n try:\n try:\n if web_driver.find_element_by_css_selector('.g-recaptcha').get_attribute('data-sitekey'):\n f = open('URL_crawled.txt', 'w', encoding='utf-8')\n for ix in self.existed_url:\n f.write(ix + \"\\n\")\n f.close()\n web_driver.quit()\n print('IP is blocked.')\n open('proxy_err.txt', 'a+').write(str(datetime.datetime.now()) + '\\t' + ','.join(random_proxy) + '\\n')\n return\n except:\n pass\n loop = 0\n while round(time.time() % 60) > 4 or loop < 5:\n while idx >= len(self.existed_url):\n idx -= random.choice(range(1, 4))\n if self.get_new_keywords(web_driver, goto=self.existed_url[idx]) == 0:\n idx -= 1\n loop += 1\n else:\n idx += 10\n # Write proxies when can't get elements 4 times\n if loop > 4:\n open('proxy_err.txt', 'a+').write(str(datetime.datetime.now()) + '\\t' + ','.join(random_proxy) + '\\n')\n\n except:\n f = open('URL_crawled.txt', 'w', encoding='utf-8')\n for ix in self.existed_url:\n f.write(ix + \"\\n\")\n f.close()",
"def get_active_proxies(max_proxy=20):\n proxies = get_free_proxies()\n\n pool = ThreadPool(50)\n active_proxies = pool.map(check_proxy, proxies)\n active_proxies = [x for x in active_proxies if x is not None]\n\n if not active_proxies:\n discord.send_message(\"No proxy to use\")\n raise Exception(\"No proxy to use\")\n\n return active_proxies[:max_proxy]",
"def proxies_pool(self):\n \n PROXY_URL = 'https://www.sslproxies.org/'\n\n # Retrieve the site's page. The 'with'(Python closure) is used here in order to automatically close the session\n # when done\n with requests.Session() as res:\n proxies_page = res.get(PROXY_URL)\n\n # Create a BeutifulSoup object and find the table element which consists of all proxies\n soup = BeautifulSoup(proxies_page.content, 'html.parser')\n proxies_table = soup.find(id='proxylisttable')\n\n # Go through all rows in the proxies table and store them in the right format (IP:port) in our proxies list\n proxies = []\n for row in proxies_table.tbody.find_all('tr'):\n proxies.append('{}:{}'.format(row.find_all('td')[utils['MAGIC_ZERO']].string, row.find_all('td')[MAGIC_ONE].string))\n return proxies",
"def pick_a_new_proxy_ip(self):\n\t\t\n\t\tdef __all_alt_ips(alt_ip):\n\t\t\tif alt_ip.startswith(\"[\") and alt_ip.endswith(\"]\"):\n\t\t\t\talt_ip = alt_ip[1:-1]\n\t\t\t\talt_ip_list = []\n\t\t\t\tfor chunk in alt_ip.split(\",\"):\n\t\t\t\t\tif '-' in chunk:\n\t\t\t\t\t\ta, b = chunk.split(\"-\")\n\t\t\t\t\t\talt_ip_list.extend(str(x) for x in xrange(int(a), int(b)+1))\n\t\t\t\t\telse:\n\t\t\t\t\t\talt_ip_list.append(chunk)\n\t\t\t\treturn alt_ip_list\n\t\t\telse:\n\t\t\t\treturn [ alt_ip ]\n\t\t\n\t\t# parse the self.proxy string\n\t\tif self.proxy is None:\n\t\t\tproxy, alt_ip = None, None\n\t\telif \"/\" in self.proxy:\n\t\t\tproxy, alt_ip_range = self.proxy.split(\"/\", 1)\n\t\t\talt_ip = random.choice(__all_alt_ips(alt_ip_range))\n\t\t\talt_ip = int(alt_ip) # 0 is default server - bool(0) == False\n\t\telse:\n\t\t\tproxy, alt_ip = self.proxy, None\n\t\t\n\t\tif proxy:\n\t\t\t# validate proxy name\n\t\t\tif '://' in proxy:\n\t\t\t\traise ValueError(\"Proxy value %r invalid (expected host[:port])\" % (proxy))\n\t\t\tif not ':' in proxy:\n\t\t\t\tproxy += \":3128\"\n\t\t\n\t\t# remember the current proxy string so we know if it's changed\n\t\tself.__current_proxy = self.proxy\n\t\t\n\t\t# set (proxy, alt_ip) as the proxy we want to use\n\t\tself.__use_this_proxy = (proxy, alt_ip)\n\t\t\n\t\t# run self.reset()\n\t\tself.reset()",
"def rand_ips(max_num=None):\n count = 0\n while max_num is None or count < max_num:\n if max_num is not None:\n count += 1\n yield random_ip()",
"def use_random_public_proxy(self, val=True, test_proxy=False):\n if not val:\n self.random_proxy_bag = False\n return False\n self.random_proxy_bag = True\n\n if not self.proxy_bag:\n self.logger.debug(\"Proxy Bag already built, not getting more.\")\n self.proxy_bag = self.get_public_proxies()\n\n self.reset_proxy_from_bag()\n if not test_proxy:\n return True\n\n if self.test_public_proxy():\n return True\n\n return False",
"def obtain(proxy):\n return pickle.loads(pickle.dumps(proxy))",
"def get(self, pages=pages):\n try:\n self.pool.map(self.proxyPage,pages)\n except urllib.error.HTTPError as e:\n self.run(e.geturl().split('/')[-1])\n return self.proxys",
"def return_proxy(self):\n\n check_server()\n url='{url}/proxy_return'.format(url=config.SERVER_URL)\n proxy_ret= [x.raw_data for x in self.proxy_pool]\n proxy_str=''\n\n for item in proxy_ret:\n proxy_str=proxy_str+item\n data={\n 'data':proxy_str\n }\n\n data=parse.urlencode(data).encode('utf-8')\n\n try:\n opener=request.build_opener()\n req=request.Request(url,data)\n res=opener.open(req).read().decode('utf-8')\n except:\n try:\n opener=request.build_opener()\n req=request.Request(url,data)\n res=opener.open(req).read().decode('utf-8')\n except:\n err_str='error:client->return_proxy:unable to ' \\\n 'connect to server'\n info_manager(err_str,type='KEY')\n return\n\n if 'return success' in res:\n print('Success: return proxy to server')\n return\n else:\n err_str='error:client->return_proxy:'+res\n info_manager(err_str,type='KEY')\n # raise ConnectionError('Unable to return proxy')\n return",
"def deal_proxies(thread_count, proxy_list):\n pool = Pool(thread_count)\n print('using {} threads to check {} proxies'.format(thread_count, len(proxy_list)))\n pool.map(deal_with_new_proxy, proxy_list)\n pool.close()\n pool.join()",
"def getRandom( self ):\n import random \n count = Mysql.ex( \"SELECT count(*) AS c FROM `%s`.`people`;\" % self.db_name )\n the_id = random.randint( 1, count[0]['c'] )\n people = self.getByID( the_id )\n return people",
"def pick_random(self, count=1):\n # keeps only active servers\n active_servers = []\n for server in self.servers:\n if server.is_active():\n active_servers.append(server)\n # shuffle\n shuffle(active_servers)\n\n # return a random slice\n if self.count_active()>0:\n if count>1:\n return active_servers[:count]\n else:\n return active_servers[0]\n else:\n return active_servers",
"def get_proxy(self, proxy_name):\n\n proxies = self.proxies()\n if proxy_name in proxies:\n return proxies[proxy_name]\n else:\n return None",
"def get_random_db(self):\n rnd = random.random() * self.totals[-1]\n pool_index = bisect.bisect_right(self.totals, rnd)\n return list(self.pool)[pool_index]",
"def get_proxy():\n response = requests.get(\"http://127.0.0.1:5010/get/\")\n json_response = response.json()\n proxy = json_response.get(\"proxy\")\n return 'http://{}'.format(proxy)",
"def load_new_proxies(self):\n retries = self.conf['retry_times']\n while retries > 0 and len(self.proxies) == 0:\n time.sleep(self.conf['retry'])\n if self.conf['fast']:\n self.load_proxy_hidemyass()\n else:\n self.load_proxy_gimmeproxy()\n retries -= 1\n\n if not len(self.proxies) and self.conf['fast']:\n self.load_proxy_gimmeproxy()\n\n # Sort proxies\n self.proxies = sorted(self.proxies, key=lambda x: x['speed'], reverse=True)",
"def fake_image_pool(self, num_fakes, fake, fake_pool):\n if num_fakes < self._pool_size:\n fake_pool[num_fakes] = fake\n return fake\n else:\n p = random.random()\n if p > 0.5:\n random_id = random.randint(0, self._pool_size - 1)\n temp = fake_pool[random_id]\n fake_pool[random_id] = fake\n return temp\n else:\n return fake",
"def test_default_pool_generator(self):\n strategy_selection.generate_default_strategy_pool(\n strategy_list=strategy.LIBFUZZER_STRATEGY_LIST, use_generator=True)",
"def get(test_url, headless, tab_concurrency, browser_concurrency, limit, selector, source_num, geo, bin_path, chrome_args, debug):\n chrome_args = chrome_args.split(',')\n _args = []\n for arg in chrome_args:\n if len(arg) > 0:\n if not arg.startswith('--'):\n arg = '--{}'.format(arg)\n _args.append(arg)\n client = proxytools.Client(debug=True)\n results = client.get_proxies(test_url,\n headless=headless,\n tab_concurrency=tab_concurrency,\n browser_concurrency=browser_concurrency,\n limit=limit,\n selector=selector,\n source_num=source_num,\n bin_path=bin_path,\n chrome_args=chrome_args)\n if geo:\n wait = 1 # seconds between WHOIS request\n for result in results:\n proxy = proxytools.proxy.Proxy.from_string(result['proxy'])\n country = proxy.country()\n result['country'] = country\n time.sleep(wait)\n print(json.dumps(results, indent=4))",
"def getRandom(self) -> int:\n rand = random.randint(0, self.count - 1)\n worker = self.head\n while rand:\n worker = worker.next\n rand -= 1\n return worker.val",
"def _init_proxies(self):\n url = 'https://free-proxy-list.net/'\n log.debug('Init proxies: Getting proxy list from web...')\n try:\n soup = BeautifulSoup(get(url).text, \"html5lib\")\n proxies = list()\n for tr in soup.select('#proxylisttable > tbody > tr'):\n td = tr.select('td')\n if (td[4].text == 'elite proxy') & (td[6].text == 'yes'):\n proxies.append(':'.join([td[0].text, td[1].text]))\n return proxies\n except:\n log.exception('Failed to download proxy list.')\n raise"
] | [
"0.7766926",
"0.7258919",
"0.72230166",
"0.70320004",
"0.6830322",
"0.67573696",
"0.6513024",
"0.64002264",
"0.63306606",
"0.6161731",
"0.60935825",
"0.6068532",
"0.59971714",
"0.5855311",
"0.584551",
"0.58218956",
"0.57690114",
"0.574567",
"0.5730094",
"0.5697484",
"0.5691875",
"0.5670521",
"0.56639093",
"0.5643586",
"0.5626824",
"0.5593532",
"0.5576006",
"0.5575781",
"0.55563027",
"0.5555462"
] | 0.8399079 | 0 |
use proxy to crawl page | def crawl(url):
while True:
try:
proxy=get_random_proxy()
proxies = {'http': 'http://' + proxy}
logger.info(proxies)
resp = requests.get(url, proxies=proxies,timeout=3) # 设置代理,抓取每个公司的连接
resp.encoding = resp.apparent_encoding # 可以正确解码
if resp.status_code==200:
html = etree.HTML(resp.text)
logger.info("成功获得公司信息url!!!")
break
else:
continue
except:
logger.info("没获取到")
continue
return html | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self):\n if self.is_full():\n return\n for crawler in self.crawlers:\n logger.info(f'crawler {crawler} to get proxy')\n proxies = crawler.run()\n if proxies:\n for proxy in proxies:\n self.redis.add(proxy)\n logger.info(f'crawled {len(proxies)} proxies from {crawler}')\n else:\n logger.info(f'cannot crawl proxies from {crawler}')",
"def scrap_site(link):\n pass # Scrapy or BeautifulSoup",
"def get(self, pages=pages):\n try:\n self.pool.map(self.proxyPage,pages)\n except urllib.error.HTTPError as e:\n self.run(e.geturl().split('/')[-1])\n return self.proxys",
"def proxy_results(url: str, output_path: str) -> None :\n results = read_gobuster_output(url, output_path)\n LOG.info(\"Proxying found URLs in Burp\")\n for item in results:\n LOG.info(f\"Requesting: {item}\")\n try:\n _ = requests.get(item, proxies=PROXIES, verify=False)\n except Exception:\n continue",
"def proxy_get(self, site_url):\n import urllib2\n res = None\n # Fail times limit\n fail = 0\n while fail < 5:\n # Load a random proxy\n proxy_count, random_proxy = self.proxyDb.random('HTTP')\n del proxy_count\n proxy_url = \"http://user:password@\"+random_proxy[0]['address']+':'+str(random_proxy[0]['port'])\n proxy_support = urllib2.ProxyHandler({'http':proxy_url})\n opener = urllib2.build_opener(proxy_support)\n urllib2.install_opener(opener)\n # Format a request\n request = urllib2.Request(site_url, headers=header)\n try:\n # Verify whether the proxy is effective\n if ipVerify:\n print 'trainHook.py: Info: IP address verification'\n print urllib2.urlopen('http://icanhazip.com', timeout=4).read()[0:-1]\n # Send request to web api\n res = urllib2.urlopen(request, timeout=5).read()\n # Handle errors\n except Exception as error:\n print 'trainHook.py: Error: Request error occurs'\n print error\n fail = fail + 1\n random_proxy[0]['fail_times'] = random_proxy[0]['fail_times'] + 1\n # write feedback to proxy database.\n finally:\n random_proxy[0]['connect_times'] = random_proxy[0]['connect_times'] + 1\n self.proxyDb.update_status(random_proxy[0]['proxy_id'],\n random_proxy[0]['connect_times'],\n random_proxy[0]['fail_times'])\n if res is not None:\n break\n return res",
"def proxy_scrape(self):\n print(\"Getting new live proxies\")\n url = 'https://free-proxy-list.net/'\n response = requests.get(url)\n parser = fromstring(response.text)\n proxies = set()\n for i in parser.xpath('//tbody/tr')[:20]:\n # if i.xpath('.//td[7][contains(text(),\"yes\")]'):\n proxy = \":\".join([i.xpath('.//td[1]/text()')\n [0], i.xpath('.//td[2]/text()')[0]])\n proxies.add(proxy)\n # return proxies\n # proxies=[]\n print(\"Obtained proxied are as : \", proxies)\n proxy_pool = cycle(proxies)\n proxy_list = [proxy for proxy in proxies]\n return proxy_pool, proxy_list",
"def __init__( self, site, debug=False, encoding=None, guess_encoding=False, requests_before_reconnect=0, proxy_must_match=None, print_requests=True):\n\t\tobject.__init__(self)\n\t\tself.debug = debug\n\t\tself.encoding = encoding\n\t\tself.guess_encoding = guess_encoding\n\t\tself.proxy_must_match = proxy_must_match # regular expression\n\t\tself.__proxy = None\n\t\t\n\t\tself.add_referer = False\n\t\tself.redirect_automatically = True\n\t\t\n\t\tself.print_requests = print_requests\n\t\t\n\t\tif requests_before_reconnect > 0:\n\t\t\tself.requests_before_reconnect = requests_before_reconnect\n\t\t\tself.requests_count = 1\n\t\telse:\n\t\t\tself.requests_before_reconnect = -1\n\t\t\n\t\tself.headers = {\n\t\t\t\"User-Agent\" : \"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)\",\n\t\t}\n\t\t\n\t\tself.https = None\n\t\tself.http = None\n\t\t\n\t\t# pick_a_new_proxy_ip needs to access self.site to create HTTPConnect object\n\t\t# then setup_browser_for_site needs to set up properly\n\t\tself.__site = site\n\t\tself.pick_a_new_proxy_ip()\n\t\tself.setup_browser_for_site(site)",
"def crawl(self, url):\n return None",
"def nanoHTTPScanProxy(URL):\n libxml2mod.xmlNanoHTTPScanProxy(URL)",
"def visit(self, max_depth = DEPTH, response_handler=record, html_rendering=False, no_expand=lambda url, doc: False):\n if self.depth >= max_depth:\n return\n if self.url.name in pool:\n return\n else:\n pool.add(self.url.name)\n \n print(f\"Requesting {self.url.name}...\")\n \n# host for relative href\n try:\n host = re.search(r\"(?:(?:https?:)?//)?([^/]+)\", self.url.name).group(1)\n except Exception:\n host = None\n\n# indicate if the request is successful\n flag = False\n site = None\n html = ''\n\n for req in self.url.request_string():\n if html_rendering:\n renderer.render(req, timeout=10)\n while not renderer.ready:\n time.sleep(1)\n html = renderer.html\n site = bs4.BeautifulSoup(html, 'html5lib')\n if html:\n flag = True\n else:\n try:\n # print(f\"Site: {req}\")\n r = requests.get(req, timeout = 5)\n if r.status_code != 200:\n print(f\"Warning: HTTP response for {req} is {r.status_code} but 200\")\n else:\n # print(\"OK\")\n flag = True\n html = r.content.decode('utf-8')\n site = bs4.BeautifulSoup(html, 'html5lib')\n break\n except requests.exceptions.Timeout:\n # print(f\"Request time out : {req}\")\n pass\n except Exception:\n # print(f\"Failed to connect : {req}\")\n pass\n\n if not site:\n return\n\n if not flag:\n return\n\n urls = []\n\n # handle the response\n response_handler(self.url.name, html)\n\n # find successors\n for tag in site.find_all('a'):\n urls.append(tag.get('href'))\n # print('Link to', tag.get('href'))\n \n if no_expand(self.url.name, html):\n # stop expanding\n return\n\n thread_pool = []\n for url in urls:\n if not url:\n continue\n # add host if started with a slash\n if url[0] == '/':\n if len(url) > 1 and url[1] == '/':\n url = url.lstrip('/')\n else:\n url = host + url\n url = url.rstrip('/')\n\n searchTask = URL(url)\n\n if not searchTask.valid:\n # print(f\"Invalid URL: {url}\")\n continue\n else:\n # if the website has been visited\n if searchTask.name in pool:\n continue\n else:\n thread = threading.Thread(target=Node(searchTask, self.depth + 1).visit, args=(max_depth, response_handler))\n thread.start()\n thread_pool.append(thread)\n\n while thread_pool:\n for thread in thread_pool:\n thread.join(timeout=0)\n if not thread.is_alive():\n thread_pool.remove(thread)\n time.sleep(1)",
"def test_proxy(mocker, proxy):\n get = mocker.patch(\"requests.get\", return_value=Mock(text=\"Foo\"))\n crawler = Crawler(proxies=[proxy] if proxy else None)\n\n url = 'http://foo.bar/'\n crawler.get(url)\n get.assert_called_once_with(url, proxies={'http': proxy})",
"def crawl(self):\n try:\n self.crawl_pages()\n self.crawl_posts()\n self.crawl_comments()\n except Exception as exception:\n self.handle_request_limit(exception)",
"def crawl(self):\n self.get('http://code.google.com/p/webscraping/')\n self.get('http://code.google.com/p/sitescraper/')\n QTimer.singleShot(5000, self.app.quit)",
"async def get_contents(self):\n if self._recon < 0:\n raise ValueError('Reconnection time needs to be positive!')\n urls = self._urls\n proxy_list = await self._pool.get_proxies(self._recon + 1)\n \n for count in range(self._recon + 1):\n proxy = proxy_list[count]\n if count > 0: # perform reconnection\n if not self._re_urls:\n print('No need to reconnect.')\n break\n else:\n if count == 1:\n print('Reconnecting...')\n print('\\n----------------------------------------------------------')\n print(ordinal[count].capitalize() + ' reconnection...\\n')\n urls = self._re_urls\n\n result_list = await self._connect(urls, proxy=proxy, which_site=True)\n\n self._re_urls.clear() # empty the reconnect urls list \n for result in result_list:\n url, soup, status, site = result\n if not self._error(url, soup, status, site, True):\n self._result += self._get_plain_text(url, soup, site)\n fail_num = len(self._re_urls)\n if count == self._recon:\n print('Failed to crawl ' + str(fail_num) + (' website.' if fail_num==1 else ' websites.'))\n\n self._result = re.sub(r'\\s+', '', self._result) # trim whitespaces\n self._result = self._rm_duplicate(self._result)",
"def proxy():\r\n document.add_heading('Proxy details', 1)\r\n proxy_metrics = ['customProperties','listenPort','restListenPort', 'allowHttp','unencryptedListenPort','authenticationListenPort',\r\n 'kerberosAuthentication', 'unencryptedAuthenticationListenPort', 'keepAliveTimeoutSeconds', 'maxHeaderSizeBytes',\r\n 'maxHeaderLines', 'hostName', 'logVerbosityAuditActivity', 'logVerbosityAuditSecurity', 'logVerbosityService',\r\n 'logVerbosityAudit', 'logVerbosityPerformance', 'logVerbositySecurity', 'logVerbositySystem','performanceLoggingInterval']\r\n\r\n proxynodes = get_qlik_sense.get_proxy()\r\n num_of_proxy = len(proxynodes)\r\n num_of_proxy_metrics = len(proxy_metrics)\r\n table = document.add_table(rows=num_of_proxy_metrics+1, cols=num_of_proxy+1)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'Metric'\r\n for item in range(0, num_of_proxy):\r\n row.cells[item+1].text = proxynodes[item][20]\r\n\r\n for item in range(num_of_proxy_metrics):\r\n row = table.rows[item+1]\r\n row.cells[0].text = str(proxy_metrics[item])\r\n\r\n for proxynode in range(num_of_proxy):\r\n row.cells[proxynode+1].text = str(proxynodes[proxynode][item])",
"def _scrape(self):",
"def scrape(self):\n pass",
"def crawler(self):\n\n\t\tfor page in range(self.first_page, self.last_page+1):\n\t\t\tprint(\"\\nCrawling Page \" + str(page))\n\t\t\tpage_url = self.site_url + \"?page=\" + str(page) +\\\n\t\t\t \"&index=prod_all_products_term_optimization\"\n\t\t\t\n\t\t\tself.scrape_features(page_url)",
"def parse(self, response):\n content_type = self.get_content_type(response.headers)\n\n sitescan = response.meta.get('sitescan')\n\n if 'text/html' not in self.get_content_type(response.headers):\n\n # For linked content, find the urlscan it linked from\n urlscan = model.URLScan.objects.get(\n site_scan=sitescan,\n page_url_hash=sha256(response.meta['referrer']).hexdigest())\n else:\n # Only create urlscans for text/html\n urlscan, us_created = model.URLScan.objects.get_or_create(\n\n site_scan=sitescan,\n page_url_hash=sha256(response.url).hexdigest(),\n defaults={'page_url': response.url,\n 'timestamp': self.get_now_time()})\n\n # Continue crawling\n # Parse stylesheet links, scripts, and hyperlinks\n hxs = HtmlXPathSelector(response)\n\n # Extract other target links\n try:\n css_links = hxs.select('//link/@href').extract()\n except TypeError:\n css_links = []\n\n try:\n js_links = hxs.select('//script/@src').extract()\n except TypeError:\n js_links = []\n\n try:\n hyperlinks = hxs.select('//a/@href').extract()\n except TypeError:\n hyperlinks = []\n\n # Using a set removes duplicate links.\n all_links = set(hyperlinks + js_links + css_links)\n\n # Examine links, yield requests if they are valid\n for url in all_links:\n\n if not url.startswith('http://'):\n # ensure that links are to real sites\n if url.startswith('javascript:'):\n continue\n else:\n url = urljoin(response.url, url)\n\n ua = response.meta['user_agent']\n\n request = Request(url)\n request.headers.setdefault('User-Agent', ua.ua_string)\n request.meta['referrer'] = response.url\n request.meta['sitescan'] = sitescan\n request.meta['user_agent'] = ua\n request.meta['content_type'] = None\n\n yield request\n\n # The response contains a user agent, we should yield an item\n item = MarkupItem()\n item['content_type'] = self.get_content_type(response.headers)\n item['filename'] = os.path.basename(urlparse(response.url).path)\n item['headers'] = unicode(response.headers)\n item['meta'] = response.meta\n item['raw_content'] = response.body\n item['sitescan'] = sitescan\n item['urlscan'] = urlscan\n item['url'] = response.url\n item['user_agent'] = response.meta.get('user_agent')\n item['redirected_from'] = response.meta.get('redirected_from',\n u'')\n yield item",
"def set_proxy(self, http_proxy):\n self.http_proxy = http_proxy\n self._geturl.http_proxy = http_proxy",
"def _urlopen(url):\n headers = config.get(\"extra_headers\",{})\n headers['User-Agent'] = config.user_agent\n\n type, host, selector = split_type_host(url)\n\n if type.lower() == \"https\":\n conn = ProxyHTTPSConnection(host, url=url)\n else:\n conn = ProxyHTTPConnection(host, url=url)\n\n conn.request(\"GET\", selector, headers=headers)\n return conn.getresponse()",
"def request( self, method, location, parameters, headers, secure ):\n\t\tif self.__current_proxy != self.proxy:\n\t\t\tself.reset()\n\t\t\tprint \"proxy changed: %r\" % (self,)\n\t\t\n\t\tif self.proxy_must_match:\n\t\t\tif ( self.proxy is None ) or ( not self.proxy_must_match.search(self.proxy) ):\n\t\t\t\traise ValueError(\"Invalid proxy %r!!! Conflicts with proxy_must_match value!\" % (self.proxy,))\n\t\t\n\t\tif self.print_requests:\n\t\t\tprint \"%s %s %r %r\" % (secure and 'HTTPS' or 'HTTP', method, location, self.__use_this_proxy,)\n\t\t\n\t\tif self.requests_before_reconnect > 0:\n\t\t\tif self.requests_count > self.requests_before_reconnect:\n\t\t\t\t#open new connection\n\t\t\t\tself.requests_count = 1\n\t\t\t\tself.reset()\n\t\t\tself.requests_count += 1\n\n\t\tif secure:\n\t\t\tconn = self.https\n\t\telse:\n\t\t\tconn = self.http\n\n\t\tif self.debug:\n\t\t\tprint conn\n\n\t\tif headers and 'Referrer' in headers:\n\t\t\traise ValueError(\"Incorrect spelling - use referer not referrer\")\n\n\t\t# This strips out the :443 of https connections from the Host header by setting it manually.\n\t\tif not 'Host' in headers:\n\t\t\theaders['Host'] = self.site\n\t\t\n\t\ttry:\n\t\t\ttry:\n\t\t\t\tconn.request( method, location, parameters, headers )\n\t\t\texcept socket.error:\n\t\t\t\tconn.close()\n\t\t\t\tconn.request( method, location, parameters, headers )\n\t\t\texcept httplib.CannotSendRequest:\n\t\t\t\tconn.close()\n\t\t\t\tconn.request( method, location, parameters, headers )\n\t\t\t\n\t\t\ttry:\n\t\t\t\tresp = conn.getresponse()\n\t\t\texcept httplib.BadStatusLine:\n\t\t\t\tconn.close()\n\t\t\t\tconn.request( method, location, parameters, headers )\n\t\t\t\tresp = conn.getresponse()\n\t\t\texcept httplib.CannotSendRequest:\n\t\t\t\tconn.close()\n\t\t\t\tconn.request( method, location, parameters, headers )\n\t\t\t\tresp = conn.getresponse()\n\t\texcept Exception, e:\n\t\t\tprint \"Reset browser.py %r because error %r\" % (self, e,)\n\t\t\tself.reset()\n\t\t\traise\n\t\t\n\t\tcookie = resp.getheader( 'set-cookie' )\n\t\tif cookie:\n\t\t\tself.cookies.add( cookie )\n\t\t\n\t\tprotocol = 'http'\n\t\tif secure:\n\t\t\tprotocol = 'https'\n\t\tself.last_visited = '%s://%s%s' % (protocol, self.site, location)\n\t\t\n\t\t# if this is a redirect:\n\t\tif resp.status >= 300 and resp.status < 400:\n\t\t\t# check if the site was specified and it differs from\n\t\t\t# the current one\n\t\t\tconn.close()\n\t\t\tlocation = resp.getheader('location')\n\t\t\t#print \"redirecting to \", location\n\t\t\tparsed_location = urlparse.urlparse(location)\n\t\t\thttp_or_https = protocol\n\t\t\tcls = LocalRedirect\n\t\t\tif parsed_location[1]:\n\t\t\t\tif parsed_location[1] != self.site:\n\t\t\t\t\tcls = ExternalRedirect\n\t\t\t\telse:\n\t\t\t\t\t# ignore the beginning bit\n\t\t\t\t\thttp_or_https = parsed_location[0]\n\t\t\t\t\tparsed_location = list(parsed_location)\n\t\t\t\t\tparsed_location[0] = ''\n\t\t\t\t\tparsed_location[1] = ''\n\t\t\t\t\tlocation = urlparse.urlunparse(parsed_location)\n\t\t\t# raise an exception for the redirection\n\t\t\traise cls(location, resp.status, resp.reason, resp, http_or_https)\n\t\t\n\t\t# set the location that was visited, in case it differs from that which\n\t\t# was specified (i.e because of a redirect)\n\t\tresp.location = location\n\t\treturn resp",
"async def _fetch(self, session, url, proxy=None, raw=False, which_site=False):\n print(url)\n result = None\n site = None\n if 'hare' in url: # {'Unknown': -1, 'Pixnet': 0, 'Hares': 1}\n site = self._websites['Hares']\n elif 'pixnet' in url:\n site = self._websites['Pixnet']\n else:\n site = self._websites['Unknown']\n\n count = 1\n while count <= 2:\n soup = ''\n status = 0\n try:\n async with session.get(url, proxy=proxy) as response:\n source_code = await response.text('utf-8')\n status = response.status\n soup = source_code if raw else BeautifulSoup(source_code, 'lxml')\n except Exception as e:\n print('Connection error: ' + str(e))\n soup = None\n finally:\n result = (url, soup, status, site) if which_site else (url, soup, status)\n if status != 0:\n return result\n if 'searcharticle' not in url:\n count += 1\n result = (url, soup, status, site) if which_site else (url, soup, status)\n return result",
"def get_random_proxy():\n url=requests.get(proxypool_url).text.strip()\n #logger.info(\"now url is\",url)\n return url",
"def _get_url_wrapper(self, url, proxies=None):\n self.request_thread = threading.Thread(target=self._get_url,\n kwargs={'url': url, 'proxies': proxies}, daemon=True)\n self.request_thread.start()\n t_start = time.time()\n t_diff = 0\n while self.request_thread.is_alive() and t_diff < 10:\n time.sleep(0.5)\n t_diff = time.time() - t_start\n # logging.info('Timeout running...')\n if t_diff >= 10:\n logging.info('### RAN INTO TIMEOUT ###')\n return False\n logging.info('Nominal execution')\n return self.html_response",
"def load_proxy_gimmeproxy(self):\n try:\n proxy = urllib2.urlopen('http://gimmeproxy.com/api/getProxy?get=true&supportsHttps=true&maxCheckPeriod=3600').read()\n self.proxies = [{'http': json.loads(proxy)['curl'], 'speed': 50}]\n\n log.msg('Loaded new proxy: {} with speed 50%'.format(self.proxies[0]['http']))\n except urllib2.HTTPError, e:\n log.msg('Proxy does not loaded: {}'.format(e.message))",
"def load_website(self):\n# r = urllib.request.urlopen(self.url).read()\n r = requests.get(self.url).content \n self.soup = bs(r, \"lxml\")",
"def _get_url(self, url, proxies=None):\n scraper = cloudscraper.create_scraper()\n try:\n html_rsp = scraper.get(url, proxies=proxies).text\n if html_rsp is None:\n logging.info('Error in SBScraper._get_url with url %s and proxy %s.', url, proxies)\n logging.info('Web response had NoneType.')\n self.html_response = False\n return\n self.html_response = html_rsp\n return\n # General exception as there are lots of errors with cloudflare. Every exception is handled via return values.\n except Exception as err: # pylint: disable=broad-except\n logging.info('Error in SBScraper._get_url with url %s and proxy %s.', url, proxies)\n logging.info('Error message was: %s', err)\n self.html_response = False\n return",
"def _init_proxies(self):\n url = 'https://free-proxy-list.net/'\n log.debug('Init proxies: Getting proxy list from web...')\n try:\n soup = BeautifulSoup(get(url).text, \"html5lib\")\n proxies = list()\n for tr in soup.select('#proxylisttable > tbody > tr'):\n td = tr.select('td')\n if (td[4].text == 'elite proxy') & (td[6].text == 'yes'):\n proxies.append(':'.join([td[0].text, td[1].text]))\n return proxies\n except:\n log.exception('Failed to download proxy list.')\n raise",
"def parse(self, html):\n \n result =json.loads(html)\n if result['code'] != 0:\n return\n MAX_PAGE = int(result['data']['last_page'])\n hosts_ports = result['data']['data']\n for ip_address in hosts_ports:\n if(ip_address):\n host = ip_address['ip']\n port = ip_address['port']\n yield Proxy(host=host, port=port)"
] | [
"0.6656336",
"0.6540788",
"0.6506942",
"0.65062267",
"0.6483475",
"0.6374295",
"0.6259475",
"0.6233891",
"0.6208135",
"0.61101043",
"0.609967",
"0.60954505",
"0.6091064",
"0.605424",
"0.6050282",
"0.60383075",
"0.6023799",
"0.59682155",
"0.5963611",
"0.59390926",
"0.59319854",
"0.5920252",
"0.59119695",
"0.58882374",
"0.58712757",
"0.586388",
"0.5853043",
"0.5809468",
"0.58062553",
"0.5779376"
] | 0.69973946 | 0 |
Checks if given position is empty ("") in the board. | def _position_is_empty_in_board(position, board):
return board[position[0]][position[1]] == "-" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_empty_space(board, position1):\n return board[position1] == \" \"",
"def emptyAt(self, position):\n\n #check for any sprites at the position\n for key in self.sprites:\n s = self.sprites[key]\n if s.position == position and s.visible: #not visible means it isn't taking up the tile\n return False\n\n #check whether the position is reserved \n for pos in self.reservedPositions:\n if pos == position:\n return False\n\n #if nothing found, it must be empty \n return True",
"def is_empty(self, row, column):\n\n return self.board[row][column] == placeholder",
"def check_for_empty(self):\n return ' ' in self.game_board",
"def board_is_empty():\n if STATE[-1].strip() == '-' * 7:\n return True\n else:\n return False",
"def is_tile_empty(self, y_pos, x_pos):\n if 15 > y_pos >= 0 and 0 <= x_pos < 15:\n return self.map[y_pos][x_pos] == ' '\n return False",
"def is_empty(self, x, y):\n if x in range(self.nx) and y in range(self.ny):\n if self.grid[y][x] == ' ':\n return True\n return False",
"def _empty_cell(self, i_row, i_col):\n return self._board[i_row][i_col] == \" \"",
"def is_empty(self, row, col):\n return self._cells[row][col] != FULL",
"def check_empty_space(self, row, column):\n return self.board[row][column] == ' '",
"def is_board_full(board):\n for i in range(1, 10):\n if check_empty_space(board, i):\n return False\n return True",
"def check_if_empty(self, current_board, user_input):\r\n if current_board[user_input] != \"X\" and current_board[user_input] != \"O\":\r\n return True\r\n else:\r\n return False",
"def is_empty(self, row, col):\n return self.field[row, col] == '-'",
"def _check_occupied(self, col, row):\n if self.board[row - 1][col - 1] == EMPTY:\n return False\n else:\n return True",
"def space_check(board, position):\n return board[position] == \" \"",
"def checkFull(self, board):\n full = True\n for i in board:\n if i == ' ': full = False\n return full",
"def checkEmpty(grid):\n for x in range(len(grid.board)):\n for y in range(len(grid.board[0])):\n if grid.board[x][y] == 0:\n return True\n return False",
"def is_full(self):\n full = True\n for i in range(len(self.board)):\n for j in range(len(self.board[i])):\n if self.board[i][j] == \"\":\n full = False\n return full",
"def check_if_board_full(self, board):\n for i in range(self.height // 80):\n for j in range(self.width // 80):\n if board[(j, i)] == 0:\n return False\n elif j == self.width // 80:\n break\n else:\n pass\n print(\"Board full! :(\")\n return True",
"def _area_is_empty(self, screen: Screen, write_position: WritePosition) -> bool:\n wp = write_position\n\n for y in range(wp.ypos, wp.ypos + wp.height):\n if y in screen.data_buffer:\n row = screen.data_buffer[y]\n\n for x in range(wp.xpos, wp.xpos + wp.width):\n c = row[x]\n if c.char != \" \":\n return False\n\n return True",
"def LegalMove(self, pos):\n\n return (0 <= pos <= BOARD_SIZE) and (self.state[pos] == EMPTY)",
"def is_unoccupied(self, row, col):\n return self.maze[row][col] is EMPTY",
"def check_position_free(self, pos=None):\n if pos is None:\n pos = self.draw.position\n return self.board.board[pos] == 0",
"def full_board_check(board):\n is_full = True\n for i in range(1, 10):\n if str(board[i]).strip() == \"\":\n is_full = False\n break\n return is_full",
"def is_board_full(board):\n return not any(0 in val for val in board)",
"def is_position_availible(self, position):\n return self.positions[position] == ' '",
"def is_empty(self) -> bool:",
"def check_full_board(self): #rows then columns\n for row in self.board:\n for column_of_row in row:\n if column_of_row == ' ':\n return False\n return True",
"def available(self, position):\n if position is not None:\n x, y = position\n return self.grid[x][y] == 0",
"def _is_empty(self):\n if self.allocated_spaces == 0:\n return True\n else:\n return False"
] | [
"0.79477996",
"0.788795",
"0.7743375",
"0.7719691",
"0.76447976",
"0.74391013",
"0.74310374",
"0.7350126",
"0.7295014",
"0.71880174",
"0.71601164",
"0.7151122",
"0.7118377",
"0.7117768",
"0.7107725",
"0.71016866",
"0.7068392",
"0.7039862",
"0.6971793",
"0.6948898",
"0.69424504",
"0.69294935",
"0.69126385",
"0.6909269",
"0.6892488",
"0.6876325",
"0.6871225",
"0.6849236",
"0.6832453",
"0.6810654"
] | 0.89006466 | 0 |
Checks if given position is a valid. To consider a position as valid, it must be a twoelements tuple, containing values from 0 to 2. | def _position_is_valid(position):
# Make sure that...
# position is a tuple
# position's length is 2
# every value in the tuple is an int
# every int in the tuple is either 0, 1 or 2
# if not, return False
if not isinstance(position, tuple) \
or len(position) != 2 \
or not all(isinstance(x, int) for x in position) \
or any(x for x in position if not 0 <= x <= 2):
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_valid_position_tuple(pos):\n try: chrom, start_pos, end_pos, strand = pos\n except (TypeError, ValueError): raise MutantError(\"Didn't get a correct position tuple! %s\"%pos)\n if strand not in SEQ_STRANDS: raise MutantError(\"Invalid strand %s!\"%strand)\n if start_pos < 1: raise MutantError(\"Sequence positions must be positive!\")\n if start_pos > end_pos: raise MutantError(\"Sequence start can't be after end!\")",
"def validate_position(position: Tuple[int, int], bound: int) -> bool:\n if position[0] < 0 or position[0] >= bound:\n return False\n if position[1] < 0 or position[1] >= bound:\n return False\n return True",
"def _is_valid_position(self, position):\n if type(position) is not int:\n raise TypeError\n\n if position > 9 or position < 1:\n raise ValueError\n\n #confirm position is open\n try:\n int(self.grid[position - 1])\n except ValueError:\n return False\n\n return True",
"def is_pos_valid(self, pos):\n if pos is None:\n pos = (0, 0)\n assert isinstance(pos, tuple)\n\n if self.grid_map[pos[0], pos[1]] in [self.empty_value, 0.7]:\n return True\n else:\n return False",
"def move_is_valid(self, pos):\n\n if (not isinstance(pos, tuple) or len(pos) != 2 or \n not isinstance(pos[0], int) or not isinstance(pos[1], int)):\n return False\n y, x = pos\n if (y >= 0 and y < self.size and x >= 0 and x < self.size and \n self.board[pos] == HexBoard.EMPTY):\n return True\n else:\n return False",
"def position(self, value):\n if (not isinstance(value, tuple) or\n len(value) != 2 or\n not all(isinstance(num, int) for num in value) or\n not all(num >= 0 for num in value)):\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n else:\n self.__position = value",
"def position(self, position):\n if type(position) is not tuple or len(position) is not 2\\\n or type(position[0]) is not int or position[0] < 0\\\n or type(position[1]) is not int or position[1] < 0:\n\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n\n self.__position = position",
"def __is_valid(self, pos):\n return 0 <= pos[0] < self._n and 0 <= pos[1] < self._n",
"def position(self, value):\n if type(value) is not tuple or len(value) != 2 or \\\n type(value[0]) is not int or value[0] < 0 or \\\n type(value[1]) is not int or value[1] < 0:\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n else:\n self.__position = value",
"def position(self, value):\n if type(value) is not tuple or len(value) != 2 \\\n or type(value[0]) is not int or type(value[1]) is not int \\\n or value[0] < 0 or value[1] < 0:\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n else:\n self.__position = value",
"def __is_valid_position(self, position):\n return (position[0] >= 0\n and position[0] < self.config.arena_size[0]\n and position[1] >= 0\n and position[1] < self.config.arena_size[1]\n and self.arena[position[0]][position[1]] != Tile.TAIL)",
"def valid_position(self, new_coords: tuple) -> bool:\n x, y = new_coords\n min_allowed_value = self.offset\n max_allowed_value = 10 - self.offset\n\n # If the value is outside of the board on the left or up, return false\n if x < min_allowed_value or y < min_allowed_value:\n return False\n # If the value is outside of the board on the right or down sides, return false\n if x > max_allowed_value or y > max_allowed_value:\n return False\n\n # If the position is taken by any piece, return false\n if self.board[y][x] != 0:\n print(\"Error: Position taken by %d\" % self.board[y][x])\n return False\n return True",
"def is_valid_position(self, x, y):\n if (x > self.n_cols-1 or y > self.n_rows-1) or (x < 0 or y < 0):\n return False\n\n elif self.grid[x][y] == 3:\n return False\n\n return True",
"def isPositionValid(self, x, y):\n if x >= self._width:\n return False\n if y >= self._height:\n return False\n if x < 0:\n return False\n if y < 0:\n return False\n return not (x, y) in self._invalidPositions",
"def _position_validity_checker(position, start, n_elements):\n _assert_shape(position, (MaxDimension.value(), n_elements + 1), \"position\")\n\n # Check if the start position of the rod and first entry of position array are the same\n assert_allclose(\n position[..., 0],\n start,\n atol=Tolerance.atol(),\n err_msg=str(\n \"First entry of position\" + \" (\" + str(position[..., 0]) + \" ) \"\n \" is different than start \" + \" (\" + str(start) + \" ) \"\n ),\n )",
"def validatePosition(boardsize, pos):\n return pos.x in range(0, boardsize) and pos.y in range(0,boardsize)",
"def is_pos_valid(pos, shape):\n x, y = pos\n is_valid = x >= 0 and x < shape[0] and y >= 0 and y < shape[1]\n return is_valid",
"def errorChecking(self, position):\n # regex check input to be a valid number\n if not re.match(\"[0-9]+\", position):\n print 'invalid input, please input a number [1-20]'\n return False\n if int(position) >= 1 and int(position) <= 20:\n # check position in table taken or not\n for subLs in self.table.values(): \n if int(position) in subLs:\n return True\n # check position in discardLs taken or not\n if int(position) in self.discardLs:\n return True\n print 'position not empty, already taken, please input another position'\n return False\n else:\n print 'Input out of range!'\n return False",
"def validate_pos(game: TowerDefenceSolver, position: Tuple[int, int], purchases_list: Purchases) -> bool:\n if (\n position[0] < 0\n or position[1] < 0\n or position[0] >= game.map_height\n or position[1] >= game.map_width\n or position in game.path\n ):\n return False\n\n for purchase in purchases_list:\n if purchase[\"coords\"] == position:\n return False\n\n return True",
"def validPosition(self, layer, row, column):\n if self.get(layer, row, column) != None:\n raise game.InvalidMoveException('The position ({}) is not free'.format([layer, row, column]))\n\n if layer > 0:\n if (\n self.get(layer - 1, row, column) is None or\n self.get(layer - 1, row + 1, column) is None or\n self.get(layer - 1, row + 1, column + 1) is None or\n self.get(layer - 1, row, column + 1) is None\n ):\n raise game.InvalidMoveException('The position ({}) is not stable'.format([layer, row, column]))",
"def is_valid(self, num, position):\n\n num_rows = len(self.board)\n num_cols = len(self.board[0])\n \n # Check row for other numbers\n for i in range(num_cols):\n if self.board[position[0]][i] == num and position[1] != i:\n return False\n\n # Check column for other numbers\n for i in range(num_rows):\n if self.board[i][position[1]] == num and position[0] != i:\n return False\n \n # Check 3z3 subsquare\n box_x = position[1] // 3\n box_y = position[0] // 3\n\n for i in range(box_y * 3, box_y*3 + 3):\n for j in range(box_x * 3, box_x*3 + 3):\n if self.board[i][j] == num and (i, j) != position:\n return False\n \n return True",
"def validate_coordinates_input(points: tuple) -> None:\n\n for coordinate in points:\n if not isinstance(coordinate, tuple):\n raise InvalidGroundValueError(\n f\"Object must be a tuple\"\n f\" with format like (1, 2), not {coordinate}\"\n )",
"def is_valid_position(self, somerow, somecol):\n valid_row = 0 <= somerow <= (self.size-1)\n valid_col = 0 <= somecol <= (self.size-1)\n #need to use self.size - 1 because while we're thinking of an 8x8 chess board, the computer is thinking of a 0x7 chess board\n return valid_row and valid_col",
"def is_valid(gr, pos, num):\n \n row = pos[0]\n col = pos[1]\n \n for i in range(0, 9):\n # test row\n if(i != col and gr[row][i] == num):\n return False\n # test col\n if(i != row and gr[i][col] == num):\n return False\n\n # test 3x3 square\n small_row = floor(row / 3) * 3\n small_col = floor(col / 3) * 3\n\n for i in range(small_row, small_row + 3):\n for j in range(small_col, small_col + 3):\n if((i != row and j != col) and gr[i][j] == num):\n return False\n \n return True",
"def _check_position(\n position: Union[str, Tuple[int, int]],\n textWidth,\n textHeight,\n window_size: Tuple[int, int],\n window_center: Tuple[int, int],\n ) -> Tuple[int, int]:\n _check_type(position, (str, tuple), \"position\")\n if isinstance(position, str):\n position = position.lower().strip()\n assert position in [\"centered\", \"center\"]\n position = (\n window_center[0] - textWidth // 2,\n window_center[1] + textHeight // 2,\n )\n for pos in position:\n _check_type(pos, (\"int\",))\n assert len(position) == 2\n assert 0 <= position[0]\n assert position[0] + textWidth <= window_size[0]\n assert 0 <= position[1] - textHeight\n assert position[1] <= window_size[1]\n return position",
"def check_for_validity_puzzle_2(pos: tuple, char: str, password: str):\n\n valid_pos, invalid_pos = pos\n # using xor\n if (password[valid_pos-1] == char) ^ (password[invalid_pos-1] == char):\n return True\n else:\n return False",
"def is_valid(self, board, position, value) -> bool:\n\n row_loc = re.findall(r'\\w+', position)[0][0] # Alphabet\n col_loc = re.findall(r'\\w+', position)[0][1] # Number\n\n for i in range(0, 9):\n if (board[row_loc + str(i+1)] == value) or (board[self.row_map[i+1] + col_loc] == value):\n return False\n\n r_grp, c_grp = [], []\n for i in range(3):\n if row_loc in self.row_group[i]:\n r_grp = (self.row_group[i])\n if int(col_loc) in self.col_group[i]:\n c_grp = (self.col_group[i])\n\n constraint = set([self.sudoku_board[r + str(c)] for r in r_grp for c in c_grp])\n if value in constraint:\n return False\n return True",
"def isValidCoord(coord, size):\n return coord[0] >= 0 and coord[0] < size and \\\n coord[1] >= 0 and coord[1] < size",
"def check_position_exists(self, pos=None):\n if pos is None:\n pos = self.draw.position\n return (pos[0] in self.range_of_valid_coordinates) and (pos[1] in self.range_of_valid_coordinates)",
"def is_position(position):\n return isinstance(position, str) and len(position) == 2 and POS_PATTERN.match(position)"
] | [
"0.76978976",
"0.7626505",
"0.7124657",
"0.70859355",
"0.6977404",
"0.69307876",
"0.685049",
"0.6840817",
"0.67205507",
"0.6672402",
"0.66398543",
"0.66194904",
"0.6600491",
"0.6599175",
"0.6573275",
"0.65055704",
"0.6466658",
"0.6444392",
"0.6436828",
"0.63908213",
"0.63521653",
"0.6341428",
"0.62906563",
"0.6191374",
"0.6177991",
"0.61652935",
"0.6120559",
"0.6115755",
"0.6110388",
"0.6092626"
] | 0.84633476 | 0 |
Checks if all 3 positions in given combination are occupied by given player. | def _is_winning_combination(board, combination, player):
"""
### Code before refactoring into a comprehension list:
for a_tuple in combination:
# e.g. a_tuple = (0,0)
# if board[0][0] != "X"
if board[a_tuple[0]][a_tuple[1]] != player:
return False
"""
if any(a_tuple for a_tuple in combination if board[a_tuple[0]][a_tuple[1]] != player):
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_winning_combinations(board, player):\n winning_combinations = (\n ((0, 0), (0, 1), (0, 2)),\n ((1, 0), (1, 1), (1, 2)),\n ((2, 0), (2, 1), (2, 2)),\n ((0, 0), (1, 0), (2, 0)),\n ((0, 1), (1, 1), (2, 1)),\n ((0, 2), (1, 2), (2, 2)),\n ((0, 0), (1, 1), (2, 2)),\n ((0, 2), (1, 1), (2, 0))\n )\n\n if any(combination for combination in winning_combinations if _is_winning_combination(board, combination, player)):\n return player\n\n return None",
"def is_in_check(self, player):\n # List of coords in board\n col = ['a','b','c','d','e','f','g','h','i'] # the columns\n a = []\n for i in range(10):\n a.append([j + str(i+1) for j in col])\n \n # Flatten the list\n board_coords = []\n for sublist in a:\n for coord in sublist:\n board_coords.append(coord)\n \n # getting each object in the board for a player\n pieces_coords = []\n pieces_left = []\n for row in range(10):\n for column in range(9):\n if self.get_board()[row][column] is not None and self.get_board()[row][column].get_color() == player.upper():\n # pieces left on the board for the player\n pieces_coords.append((row, column))\n pieces_left.append(self.get_board()[row][column])\n \n p_b_coord = (pieces_coords, board_coords)\n \n counter = 0 \n for piece_coord in pieces_coords: \n for board_coord in board_coords: \n translated_index = self.column_to_letter(piece_coord[1]) + str(piece_coord[0]) \n piece = self.get_piece_type(translated_index)\n if piece is not None:\n if piece.check_legal(translated_index, board_coord, self.get_board(), self.get_game_state()) == True:\n counter += 1\n print(counter)\n if counter == 0:\n self._current_state = upper(player) + '_WON'\n return True \n return False",
"def check_win(self, player):\n for win_pos in TicTacToe.win_pos:\n # for each winning position defined we take the set difference to the positions played be player\n # if there are not elements left after resulting set after difference operator\n # we get False as return. ie he has placed his marker in the winning positions which in turn makes him\n # the winner\n if not win_pos.difference(self.player_played_pos[player]):\n return True\n\n # if after checking for every winning positions if the control still reaches here,\n # the player has not marked the winning positions. returns False\n return False",
"def check_positions(d, positions, player):\n contents = [d[y][x] for x, y in positions]\n contents = ''.join(contents) # e.g. 'XXO.'\n if contents == player * 4:\n return True",
"def check_if_user_won(self, board, pos, player_no):\n\n has_player_got_4 = set()\n has_player_got_4.add(pos)\n\n self.check_horizontal(has_player_got_4, board, pos, player_no)\n\n if len(has_player_got_4) >= 4:\n return True\n\n has_player_got_4 = set()\n has_player_got_4.add(pos)\n\n self.check_vertical(has_player_got_4, board, pos, player_no)\n\n if len(has_player_got_4) >= 4:\n return True\n\n has_player_got_4 = set()\n has_player_got_4.add(pos)\n\n self.check_diagonal(has_player_got_4, board, pos, player_no)\n\n if len(has_player_got_4) >= 4:\n return True\n\n has_player_got_4 = set()\n has_player_got_4.add(pos)\n\n self.check_inverted_diagonal(has_player_got_4, board, pos, player_no)\n\n if len(has_player_got_4) >= 4:\n return True\n\n if self.check_if_board_full(board):\n self.draw = True\n return True",
"def _check_winner_3d(self, board, action, height, player=None):\n slices = []\n slices.append(board[action[0], :, :])\n slices.append(board[:, action[1], :])\n slices.append(board[:, :, height])\n # todo: stack with a loop for Score N. Also, these slices don't have to be checked all the time, maybe add some if-conditions\n slices.append(np.stack((board[0, 0, :], board[1, 1, :], board[2, 2, :], board[3, 3, :]), axis=0))\n slices.append(np.stack((board[0, 3, :], board[1, 2, :], board[2, 1, :], board[3, 0, :]), axis=0))\n\n temp = 0\n for slice in slices:\n temp = self.check_combo(slice, player)\n if temp != 0:\n break\n winner = temp\n\n #game_over = winner != 0 or len(np.argwhere(self.board).reshape(-1, )) == 0\n return winner",
"def check_combo(self, matrix, player=None):\n if player is None:\n player = self.current_player\n \n if self.N * player in np.sum(matrix, axis=0):\n return player\n if self.N * player in np.sum(matrix, axis=1):\n return player\n if np.sum(matrix.diagonal()) == self.N * player:\n return player\n if np.sum(np.fliplr(matrix).diagonal()) == self.N * player:\n return player\n return 0",
"def check_square(self, player_positions):\n\t\tprint \"Inside check square!-----\"\n\t\tlength = len(player_positions)\n\t\tsides = []\n\n\t\tfor item in combinations(player_positions,4):\n\t\t\t\"\"\"\n\t\t\t\tpopulating the sides using three points of the combination\n\t\t\t\"\"\"\n\t\t\tsides = []\n\t\t\tsides.append(self.distance(item[0],item[1]));\n\t\t\tsides.append(self.distance(item[0],item[2]));\n\t\t\tsides.append(self.distance(item[0],item[3]));\n\t\t\tresult = self.isSquare(sides,item)\n\t\t\tif(result == True):\n\t\t\t\treturn result\n\t\treturn False",
"def check_win(self, player):\n def check_row_win(player):\n for row in self.game_state:\n if player == row[0] == row[1] == row[2]:\n return True\n return False\n\n def check_column_win(player):\n # For doing a column check, transpose the grid and do a row check\n trans_game_state = numpy.transpose(self.game_state)\n for row in trans_game_state:\n if player == row[0] == row[1] == row[2]:\n return True\n return False\n\n def check_diag_win(player):\n # Left to right diagonal\n if player == self.game_state[0][0] == self.game_state[1][1] == self.game_state[2][2]:\n return True\n # Right to left diagonal\n if player == self.game_state[0][2] == self.game_state[1][1] == self.game_state[2][0]:\n return True\n return False\n\n if check_column_win(player) or check_diag_win(player) or check_row_win(player):\n return True\n return False",
"def check_combination(self, combination):\n\n # we first check if there are any pieces of the right value well placed.\n for j in range(0, 4):\n if combination[j] == self.answer[j]:\n self.try_return['well_placed'] += 1\n self.already_checked += [combination[j]]\n self.avoid += [j]\n\n for p in range(0, 4):\n for s in range(0, 4):\n if not p in self.avoid:\n if combination[s] == self.answer[p] and not combination[s] in self.already_checked:\n\n self.try_return['misplaced'] += 1\n self.duplicate += [combination[s]]\n if self.duplicate.count(combination[s]) > 1:\n self.try_return['misplaced'] -= 1",
"def can_complete_three_in_row(self, row_positions, board):\n\n row = [board.get_piece(row_positions[0][0], row_positions[0][1]), board.get_piece(row_positions[1][0], row_positions[1][1]), board.get_piece(row_positions[2][0], row_positions[2][1])]\n\n if row.count(' ') == 1 and row.count(self._piece) == 2:\n self_winner = row.index(' ')\n else:\n self_winner = -1\n\n\n if row.count(' ') == 1 and row.count(self._piece) == 0:\n opponent_winner = row.index(' ')\n else:\n opponent_winner = -1\n \n return (self_winner, opponent_winner)",
"def win(self, player):\n if player == 1:\n a = self.player_one.moves\n else:\n a = self.player_two.moves\n winning_moves = []\n for i in range(1, 9, 3):\n winning_moves.append(range(i, i + 3))\n for i in range(1, 4):\n winning_moves.append(range(i, i + 7, 3))\n winning_moves.append([1, 5, 9])\n winning_moves.append([3, 5, 7])\n for move in winning_moves:\n flg = True\n for index in move:\n if index not in a:\n flg = False\n break\n if flg:\n return True, player\n if len(self.player_one.moves) + len(self.player_two.moves) == 9:\n self.print_space()\n self.display_board()\n self.print_space()\n print \" Games is drawn\"\n self.logging.debug(\"Game is draw, nobody won\")\n self.logging.debug(\"Enjoy the game again :)\")\n sys.exit(100)\n return False, player",
"def win(player1, player2):\n if(player1 == 1 and player2 == 3) or (player1 == 2 and player2 == 1) \\\n or (player1 == 3 and player2 == 2):\n return True",
"def check_board(board_state, player_symbol, display_message = False):\n\n is_board_completely_filled = board_state.isalpha()\n\n indices_set = set([ind+1 for ind, val in enumerate(board_state) if val == player_symbol])\n\n if {1, 2, 3}.issubset(indices_set) or {4, 5, 6}.issubset(indices_set) or {7, 8, 9}.issubset(indices_set):\n\n if display_message:\n print(\"Row completed..!!!\")\n print(\"Player \"+player_symbol+\" won the game.\")\n\n return True\n\n if {1, 4, 7}.issubset(indices_set) or {2, 5, 8}.issubset(indices_set) or {3, 6, 9}.issubset(indices_set):\n\n if display_message:\n print(\"Column completed..!!!\")\n print(\"Player \"+player_symbol+\" won the game.\")\n\n return True\n if {1, 5, 9}.issubset(indices_set) or {3, 5, 7}.issubset(indices_set):\n\n if display_message:\n print(\"Diagonal completed..!!!\")\n print(\"Player \"+player_symbol+\" won the game.\")\n\n return True\n\n if is_board_completely_filled:\n\n if display_message:\n print(\"Game is drawn...!\")\n\n return \"Draw\"\n\n return False",
"def check_won (grid):\r\n for i in range(4): \r\n for j in range(4):\r\n if grid[i][j] >= 32:\r\n return True\r\n return False",
"def check_won (grid):\r\n for i in range (4):\r\n for j in range (4):\r\n if grid[i][j] >= 32:\r\n return True\r\n return False",
"def check_won (grid):\r\n p=0\r\n for k in range(len(grid)):\r\n for g in range(len(grid[k])): \r\n if grid[k][g]>=32:\r\n p+=1\r\n else:\r\n ()\r\n if p>0:\r\n return True\r\n else:\r\n return False",
"def checkAll(self, player, board):\n #retrieve current moves of the player who made the last move\n currentMoves = self.getPlayerMoves(player,board)\n\n #check column win\n is_col_win = self.checkWin(currentMoves, self.columnWins)\n if is_col_win != False:\n return True\n\n #check row win\n is_row_win = self.checkWin(currentMoves, self.rowWins)\n if is_row_win != False:\n return True\n\n #check diagonal win\n is_diag_win = self.checkWin(currentMoves, self.diagonalWins)\n if is_diag_win != False:\n return True\n else:\n return False",
"def check_won(grid):\r\n for i in range(len(grid)):\r\n for j in range(len(grid[i])):\r\n if grid[i][j] >= 32:\r\n return True \r\n return False",
"def check_won(board,player):\n # X axis\n if (\n (len(set(board[1:4])) == 1 and ' ' not in set(board[1:4])) or\n (len(set(board[4:7])) == 1 and ' ' not in set(board[4:7])) or\n (len(set(board[7:10])) == 1 and ' ' not in set(board[7:10]))\n ):\n print('Player %s, you win!' % player)\n display_board(board)\n return True\n # Y axis\n if (\n (len(set(board[1::3])) == 1 and ' ' not in set(board[1::3])) or\n (len(set(board[2::3])) == 1 and ' ' not in set(board[2::3])) or\n (len(set(board[3::3])) == 1 and ' ' not in set(board[3::3]))\n ):\n print('Player %s, you win!' % player)\n display_board(board)\n return True\n # Diagonals\n if (\n (len(set(board[1::4])) == 1 and ' ' not in set(board[1::4])) or\n (len(set(board[3:9:2])) == 1 and ' ' not in set(board[3:9:2]))\n ):\n print('Player %s, you win!' % player)\n display_board(board)\n return True\n\n return False",
"def win_condition(self, player):\n\n row_list = []\n column_list = []\n constant_condition = False\n row_sequential_condition = False\n column_sequential_condition = False\n\n # Loop through positions on board for player\n for position_key, position_obj in sorted(self.board.positions.items()):\n if position_obj.value == player.value:\n row_list.append(position_obj.row)\n column_list.append(position_obj.column)\n\n # Either row keys or column keys must stay constant\n row_set = set(row_list)\n column_set = set(column_list)\n if len(row_set) == 1 or len(column_set) == 1:\n constant_condition = True\n\n # The other row keys or column keys must be sequential for number of row or columns\n row_seq_list = [n for n in range(1, self.board.rows + 1)]\n column_seq_list = [n for n in range(1, self.board.columns + 1)]\n if row_list == row_seq_list:\n row_sequential_condition = True\n if column_list == column_seq_list:\n column_sequential_condition = True\n\n if constant_condition and (row_sequential_condition or column_sequential_condition):\n return True\n elif row_sequential_condition and column_sequential_condition:\n return True\n else:\n return False",
"def __is_board_full(self):\r\n for row in self.__board:\r\n if {self.PLAYER1, self.PLAYER2} & set(row) != 0:\r\n return False\r\n return True",
"def check_win(self):\n for pos in self.win_set:\n # s would be all 1 if all positions of a winning move is fulfilled\n # otherwise 1s and 0s\n s = set([self.grid[p] for p in pos])\n if len(s) == 1 and (0 not in s):\n return True\n return False",
"def check_win(players: List[Player]) -> Tuple[bool, Optional[Player]]:\n total_players = len(players)\n for player in players:\n if player.influence == 0:\n total_players -= 1\n if total_players == 1:\n for player in players:\n if player.influence >0:\n return True, player\n return False, None",
"def check_opponent_winning(self):\n valid_actions = self.get_valid_actions()\n copy_board = np.copy(self.board)\n for action in list(valid_actions):\n height = self.get_height(action, board=copy_board)\n self.set(action, height=height, value=self.current_player * -1, board=copy_board)\n\n if self.check_winner(copy_board, action, height) != 0:\n return True\n\n self.set(action, height=height, value=0, board=copy_board)\n\n return False",
"def collision(cubes, player):\n if player in cubes:\n return True\n else:\n return False",
"def checkSuitability(self, sequenceCoords, candidateCoords):\n\t\ts = range(sequenceCoords[0], sequenceCoords[1])\n\t\tc = range(candidateCoords[0], candidateCoords[1])\n\t\tss = set(s)\n\t\ti = ss.intersection(c)\n\t\treturn len(i) < EXON_LENGTH",
"def validate_pos(game: TowerDefenceSolver, position: Tuple[int, int], purchases_list: Purchases) -> bool:\n if (\n position[0] < 0\n or position[1] < 0\n or position[0] >= game.map_height\n or position[1] >= game.map_width\n or position in game.path\n ):\n return False\n\n for purchase in purchases_list:\n if purchase[\"coords\"] == position:\n return False\n\n return True",
"def is_possible_grid(self,row,col,user_value):\n start_row = row - (row % 3)\n start_col = col - (col % 3)\n for x in range(3):\n for y in range(3):\n if self.arr[x+start_row][y+start_col] == user_value:\n logging.debug(f\"is_posssible_grid(): (False) x: {x} y: {y} s_row: {start_row} s_col: {start_col} arr[x+start_row][y+start_col]: {self.arr[x+start_row][y+start_col]} == {user_value}\")\n return False\n logging.debug(f\"is_posssible_grid(): (True) x: {x} y: {y} s_row: {start_row} s_col: {start_col} arr[x+start_row][y+start_col]: {self.arr[x+start_row][y+start_col]} != {user_value}\")\n return True",
"def is_won(self):\n combinations = [*[(i, i + 3, i + 6) for i in range(3)],\n *[(i*3, i*3 + 1, i*3 + 2) for i in range(3)],\n (0, 4, 8), (2, 4, 6)]\n\n win = [*filter(lambda x: self[x[0]] == self[x[1]] == self[x[2]] and\n self[x[0]] != self.CELL_EMPTY, combinations)]\n return self[win[0][0]] if len(win) > 0 else self.CELL_EMPTY"
] | [
"0.7056445",
"0.6785087",
"0.6513575",
"0.6407896",
"0.63359",
"0.6332282",
"0.6314953",
"0.62546915",
"0.62414443",
"0.6239754",
"0.6238734",
"0.61814517",
"0.6174631",
"0.6169825",
"0.61628115",
"0.6144622",
"0.61050117",
"0.60615724",
"0.60549986",
"0.6050816",
"0.60260415",
"0.6024108",
"0.6003962",
"0.59974927",
"0.5985932",
"0.59848505",
"0.5955237",
"0.59417844",
"0.59298456",
"0.5927045"
] | 0.72803354 | 0 |
There are 8 posible combinations (3 horizontals, 3, verticals and 2 diagonals) to win the Tictactoe game. This helper loops through all these combinations and checks if any of them belongs to the given player. | def _check_winning_combinations(board, player):
winning_combinations = (
((0, 0), (0, 1), (0, 2)),
((1, 0), (1, 1), (1, 2)),
((2, 0), (2, 1), (2, 2)),
((0, 0), (1, 0), (2, 0)),
((0, 1), (1, 1), (2, 1)),
((0, 2), (1, 2), (2, 2)),
((0, 0), (1, 1), (2, 2)),
((0, 2), (1, 1), (2, 0))
)
if any(combination for combination in winning_combinations if _is_winning_combination(board, combination, player)):
return player
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _is_winning_combination(board, combination, player):\n\n \"\"\"\n ### Code before refactoring into a comprehension list:\n\n for a_tuple in combination:\n\n # e.g. a_tuple = (0,0)\n # if board[0][0] != \"X\"\n if board[a_tuple[0]][a_tuple[1]] != player:\n\n return False\n \"\"\"\n\n if any(a_tuple for a_tuple in combination if board[a_tuple[0]][a_tuple[1]] != player):\n return False\n\n return True",
"def win_game(self):\n\n def horizontal_win():\n \"\"\"Return whether there is horizontal win\"\"\"\n\n for i in range(0, board_size):\n if set(self.board[i]) == set([o_symbol]) or set(self.board[i]) == set([x_symbol]):\n print \"horizontal win\"\n return True\n\n def vertical_win():\n \"\"\"Return whether there is vertical win\"\"\"\n\n vert_set = set()\n for i in range(0, board_size):\n for j in range(0, board_size):\n vert_set.add(self.board[j][i])\n if vert_set == set([o_symbol]) or vert_set == set([x_symbol]):\n print \"vertical win\"\n return True \n vert_set = set()\n\n def diagonal_win():\n \"\"\"Return whether there is diagonal win\"\"\"\n\n diagonal_set = set()\n for i in range(0, board_size):\n diagonal_set.add(self.board[i][i]) \n\n if diagonal_set == set([o_symbol]) or diagonal_set == set([x_symbol]):\n print \"diagonal win 1\"\n return True\n \n diagonal_set = set()\n for i in range(0, board_size):\n diagonal_set.add(self.board[i][board_size - 1 - i])\n\n if diagonal_set == set([o_symbol]) or diagonal_set == set([x_symbol]):\n print \"diagonal win 2\"\n return True\n\n if horizontal_win() or vertical_win() or diagonal_win():\n print \"You have won.\"\n return True",
"def check_win(self, player):\n def check_row_win(player):\n for row in self.game_state:\n if player == row[0] == row[1] == row[2]:\n return True\n return False\n\n def check_column_win(player):\n # For doing a column check, transpose the grid and do a row check\n trans_game_state = numpy.transpose(self.game_state)\n for row in trans_game_state:\n if player == row[0] == row[1] == row[2]:\n return True\n return False\n\n def check_diag_win(player):\n # Left to right diagonal\n if player == self.game_state[0][0] == self.game_state[1][1] == self.game_state[2][2]:\n return True\n # Right to left diagonal\n if player == self.game_state[0][2] == self.game_state[1][1] == self.game_state[2][0]:\n return True\n return False\n\n if check_column_win(player) or check_diag_win(player) or check_row_win(player):\n return True\n return False",
"def check_for_win(self, row, col, player): \n\n count = 0\n for i in range(0, len(self.board[0])):\n # Check vertical\n if self.board[row][i] == player:\n count += 1\n else:\n count = 0\n \n if count == self.max_count:\n return True\n\n count = 0\n for i in range(0, len(self.board)):\n # Check horisontal\n if self.board[:, col][i] == player:\n count += 1\n else:\n count = 0\n \n if count == self.max_count:\n return True\n \n count = 0\n totoffset = col - row\n for i in np.diagonal(self.board, offset=totoffset):\n # Check diagonal\n if i == player:\n count += 1\n else:\n count = 0\n\n if count == self.max_count:\n return True\n\n count = 0\n mirrorboard = np.fliplr(self.board)\n col = self.colswitch[col]\n totoffset = col - row\n for i in np.diagonal(mirrorboard, offset=totoffset):\n # Check other diagonal\n if i == player:\n count += 1\n else:\n count = 0\n\n if count == self.max_count:\n return True",
"def checkAll(self, player, board):\n #retrieve current moves of the player who made the last move\n currentMoves = self.getPlayerMoves(player,board)\n\n #check column win\n is_col_win = self.checkWin(currentMoves, self.columnWins)\n if is_col_win != False:\n return True\n\n #check row win\n is_row_win = self.checkWin(currentMoves, self.rowWins)\n if is_row_win != False:\n return True\n\n #check diagonal win\n is_diag_win = self.checkWin(currentMoves, self.diagonalWins)\n if is_diag_win != False:\n return True\n else:\n return False",
"def check_winner(self):\n for row in self.board.values():\n if all([mark == \"x\" for mark in row]):\n return self.player_1\n elif all([mark == \"o\" for mark in row]):\n return self.player_2\n\n # checks every column\n for i in range(3):\n first_row, second_row, third_row = self.board.values()\n if first_row[i] == \"x\" and second_row[i] == \"x\" and third_row[i] == \"x\":\n return self.player_1\n elif first_row[i] == \"o\" and second_row[i] == \"o\" and third_row[i] == \"o\":\n return self.player_2\n\n # checks the diagonals\n if self.board[\"a\"][0] == \"x\" and self.board[\"b\"][1] == \"x\" and self.board[\"c\"][2] == \"x\":\n return self.player_1\n if self.board[\"a\"][2] == \"o\" and self.board[\"b\"][1] == \"o\" and self.board[\"c\"][0] == \"o\":\n return self.player_2\n\n return None",
"def check_victory(board):\n\n for row in range(HEIGHT):\n for col in range(WIDTH):\n\n player = board[row][col]\n\n # not a player move\n if player == 0 or player == 9:\n continue\n\n # look right\n if col + 3 < WIDTH and player == board[row][col + 1] and player == board[row][col + 2]\\\n and player == board[row][col + 3]:\n if player == 1:\n return +1\n else:\n return -1\n\n if row + 3 < HEIGHT:\n\n # down\n if player == board[row + 1][col] and player == board[row + 2][col] and player == board[row + 3][col]:\n if player == 1:\n return +1\n else:\n return -1\n\n # down and right\n if col + 3 < WIDTH and player == board[row + 1][col + 1] and player == board[row + 2][col + 2]\\\n and player == board[row + 3][col + 3]:\n if player == 1:\n return +1\n else:\n return -1\n\n # down and left\n if col - 3 >= 0 and player == board[row + 1][col - 1] and player == board[row + 2][col - 2] \\\n and player == board[row + 3][col - 3]:\n if player == 1:\n return +1\n else:\n return -1\n\n\n # # if no one has won yet\n for row in range(HEIGHT):\n for col in range(WIDTH):\n if board[row][col] == 0 or board[row][col] == 9:\n return None\n\n return 0",
"def win(player1, player2):\n if(player1 == 1 and player2 == 3) or (player1 == 2 and player2 == 1) \\\n or (player1 == 3 and player2 == 2):\n return True",
"def check_won(board,player):\n # X axis\n if (\n (len(set(board[1:4])) == 1 and ' ' not in set(board[1:4])) or\n (len(set(board[4:7])) == 1 and ' ' not in set(board[4:7])) or\n (len(set(board[7:10])) == 1 and ' ' not in set(board[7:10]))\n ):\n print('Player %s, you win!' % player)\n display_board(board)\n return True\n # Y axis\n if (\n (len(set(board[1::3])) == 1 and ' ' not in set(board[1::3])) or\n (len(set(board[2::3])) == 1 and ' ' not in set(board[2::3])) or\n (len(set(board[3::3])) == 1 and ' ' not in set(board[3::3]))\n ):\n print('Player %s, you win!' % player)\n display_board(board)\n return True\n # Diagonals\n if (\n (len(set(board[1::4])) == 1 and ' ' not in set(board[1::4])) or\n (len(set(board[3:9:2])) == 1 and ' ' not in set(board[3:9:2]))\n ):\n print('Player %s, you win!' % player)\n display_board(board)\n return True\n\n return False",
"def checkForWin(self, board, player):\n\t\tif ((board[0][0] == player and board[0][1] == player and board[0][2] == player) or\n\t\t\t(board[1][0] == player and board[1][1] == player and board[1][2] == player) or\n\t\t\t(board[2][0] == player and board[2][1] == player and board[2][2] == player) or\n\t\t\t(board[0][0] == player and board[1][1] == player and board[2][2] == player) or\n\t\t\t(board[0][2] == player and board[1][1] == player and board[2][0] == player) or\n\t\t\t(board[0][0] == player and board[1][0] == player and board[2][0] == player) or\n\t\t\t(board[0][1] == player and board[1][1] == player and board[2][1] == player) or\n\t\t\t(board[0][2] == player and board[1][2] == player and board[2][2] == player)):\n\t\t\tprint(\"----------------------------\")\n\t\t\tprint(\"Yay! Player%d is the winner!\" % player)\n\t\t\tprint(\"----------------------------\")\n\t\t\tself.win = player",
"def win(self, player):\n if player == 1:\n a = self.player_one.moves\n else:\n a = self.player_two.moves\n winning_moves = []\n for i in range(1, 9, 3):\n winning_moves.append(range(i, i + 3))\n for i in range(1, 4):\n winning_moves.append(range(i, i + 7, 3))\n winning_moves.append([1, 5, 9])\n winning_moves.append([3, 5, 7])\n for move in winning_moves:\n flg = True\n for index in move:\n if index not in a:\n flg = False\n break\n if flg:\n return True, player\n if len(self.player_one.moves) + len(self.player_two.moves) == 9:\n self.print_space()\n self.display_board()\n self.print_space()\n print \" Games is drawn\"\n self.logging.debug(\"Game is draw, nobody won\")\n self.logging.debug(\"Enjoy the game again :)\")\n sys.exit(100)\n return False, player",
"def win_condition(self, player):\n\n row_list = []\n column_list = []\n constant_condition = False\n row_sequential_condition = False\n column_sequential_condition = False\n\n # Loop through positions on board for player\n for position_key, position_obj in sorted(self.board.positions.items()):\n if position_obj.value == player.value:\n row_list.append(position_obj.row)\n column_list.append(position_obj.column)\n\n # Either row keys or column keys must stay constant\n row_set = set(row_list)\n column_set = set(column_list)\n if len(row_set) == 1 or len(column_set) == 1:\n constant_condition = True\n\n # The other row keys or column keys must be sequential for number of row or columns\n row_seq_list = [n for n in range(1, self.board.rows + 1)]\n column_seq_list = [n for n in range(1, self.board.columns + 1)]\n if row_list == row_seq_list:\n row_sequential_condition = True\n if column_list == column_seq_list:\n column_sequential_condition = True\n\n if constant_condition and (row_sequential_condition or column_sequential_condition):\n return True\n elif row_sequential_condition and column_sequential_condition:\n return True\n else:\n return False",
"def win_check(table: list) -> (bool, str):\n # Combinations that would lead to a win\n win_list = [\n [0,1,2], [3,4,5],\n [6,7,8], [0,3,6],\n [1,4,7], [2,5,8],\n [0,4,8], [6,4,2],\n ]\n for line in win_list:\n # Check rows, columns, and diagonals\n combination = set([table[line[0]], table[line[1]], table[line[2]]])\n\n if len(combination) == 1 and combination != {\"-\"}: # Which mean we have a straight line of either X or O\n #unpack comb (which is 1 item), which is either \"X\" or \"O\" to know who won\n return True, *combination\n else:\n return False, None",
"def winFor(self,player):\n if(self.cachedWin == False):\n won = False;\n if(player==WHITE):\n for x in range(0,WIDTH):\n if(self.gameState[x,0]==WHITE):\n won = True\n \n elif(player==BLACK):\n for x in range(0,WIDTH):\n if(self.gameState[x,HEIGHT-1]==BLACK):\n won = True\n \n if(len(self.successors()) == 0):#IF there are no available moves for both players\n bCount = self.count(BLACK) #check who has the most pawns\n wCount = self.count(BLACK)\n if(bCount>wCount):\n self.cachedWin = True\n self.cachedWinner = player\n return True\n if(wCount>bCount):\n self.cachedWin = True\n self.cachedWinner = player\n return True\n \n if(won):\n self.cachedWin = True\n self.cachedWinner = player\n return True\n else:\n return False\n else:\n return player == self.cachedWinner",
"def win():\r\n\r\n\tglobal turn, tile1, tile2, tile3, tile4, tile5, tile6, tile7, tile8, tile9, move1, move2, player1, player2\r\n\r\n\twin1 = tile1==tile2==tile3==1 or tile1==tile2==tile3==2\r\n\twin2 = tile4==tile5==tile6==1 or tile4==tile5==tile6==2\r\n\twin3 = tile7==tile8==tile9==1 or tile7==tile8==tile9==2\r\n\twin4 = tile1==tile4==tile7==1 or tile1==tile4==tile7==2\r\n\twin5 = tile2==tile5==tile8==1 or tile2==tile5==tile8==2\r\n\twin6 = tile3==tile6==tile9==1 or tile3==tile6==tile9==2\r\n\twin7 = tile1==tile5==tile9==1 or tile1==tile5==tile9==2\r\n\twin8 = tile3==tile5==tile7==1 or tile3==tile5==tile7==2\r\n\r\n\twin = win1 or win2 or win3 or win4 or win5 or win6 or win7 or win8\r\n\treturn win",
"def check_win(self, player):\n for win_pos in TicTacToe.win_pos:\n # for each winning position defined we take the set difference to the positions played be player\n # if there are not elements left after resulting set after difference operator\n # we get False as return. ie he has placed his marker in the winning positions which in turn makes him\n # the winner\n if not win_pos.difference(self.player_played_pos[player]):\n return True\n\n # if after checking for every winning positions if the control still reaches here,\n # the player has not marked the winning positions. returns False\n return False",
"def winning_event(self, player):\n # vertical check\n for col in range(GameData.columns):\n if self.board[0][col] == player and self.board[1][col] == player and self.board[2][col] == player:\n self.draw_vertical_winning_line(col, player)\n print(\"Player {} has won the game!\".format(player))\n self.game_over = True\n return True\n\n # horizontal check\n for row in range(GameData.rows):\n if self.board[row][0] == player and self.board[row][1] == player and self.board[row][2] == player:\n self.draw_horizontal_winning_line(row, player)\n print(\"Player {} has won the game!\".format(player))\n self.game_over = True\n return True\n\n # ascending diagonal heck\n if self.board[2][0] == player and self.board[1][1] == player and self.board[0][2] == player:\n self.draw_asc_diagonal(player)\n print(\"Player {} has won the game!\".format(player))\n self.game_over = True\n return True\n\n # descending diagonal win chek\n if self.board[0][0] == player and self.board[1][1] == player and self.board[2][2] == player:\n self.draw_desc_diagonal(player)\n print(\"Player {} has won the game!\".format(player))\n self.game_over = True\n return True\n\n return False",
"def _check_winner_3d(self, board, action, height, player=None):\n slices = []\n slices.append(board[action[0], :, :])\n slices.append(board[:, action[1], :])\n slices.append(board[:, :, height])\n # todo: stack with a loop for Score N. Also, these slices don't have to be checked all the time, maybe add some if-conditions\n slices.append(np.stack((board[0, 0, :], board[1, 1, :], board[2, 2, :], board[3, 3, :]), axis=0))\n slices.append(np.stack((board[0, 3, :], board[1, 2, :], board[2, 1, :], board[3, 0, :]), axis=0))\n\n temp = 0\n for slice in slices:\n temp = self.check_combo(slice, player)\n if temp != 0:\n break\n winner = temp\n\n #game_over = winner != 0 or len(np.argwhere(self.board).reshape(-1, )) == 0\n return winner",
"def get_winner(board):\n\n def who_won(in_a_row, board_size, cur_player):\n \"\"\" \n a function private to get_winner() (yes you can do this. Cool huh!?) \n that tells get_winner if it has a winner \n \"\"\"\n if in_a_row == board_size:\n return 1 if cur_player == 'X' else 2\n else:\n return 0\n\n def test_row_col(board, rows):\n \"\"\" private function to test the rows and columns \"\"\"\n for i in range(len(board)):\n cur_player = board[i][0] if rows else board[0][i]\n in_a_row = 0\n for j in range(len(board)):\n symbol = board[i][j] if rows else board[j][i]\n if (not symbol == '-') and (symbol == cur_player):\n in_a_row += 1\n else:\n break\n winner = who_won(in_a_row, len(board), cur_player)\n if not winner == 0:\n return winner\n return 0\n\n def test_diagonal(board, normal):\n \"\"\" private function to test the two diagonals \"\"\"\n cur_player = board[0][0] if normal else board[0][len(board)-1]\n in_a_row = 0\n for i in range(len(board)):\n symbol = board[i][i] if normal else board[i][len(board)-1-i]\n if (not symbol == '-') and (symbol == cur_player):\n in_a_row += 1 \n else:\n break\n winner = who_won(in_a_row, len(board), cur_player)\n if not winner == 0:\n return winner\n return 0\n\n\n # test rows\n winner = test_row_col(board, True)\n if not winner == 0:\n return winner\n\n # test cols\n winner = test_row_col(board, False)\n if not winner == 0:\n return winner\n\n # test diagonal from top left to bottom right\n winner = test_diagonal(board, True)\n if not winner == 0:\n return winner\n\n # test diagonal from top right to bottom left\n winner = test_diagonal(board, False)\n if not winner == 0:\n return winner\n\n return 0",
"def victory_check(self):\n\n # get essential values\n board = self.get_game_space()\n affinity = self.get_affinity()\n \n # pick the right check for the game we are playing\n if isinstance(board, Gomoku):\n \n # get the possible ways to win\n possible_wins = board.get_wins(affinity)\n \n # if we can win, pick a good win \n if len(possible_wins) == 1: return possible_wins[0]\n elif len(possible_wins) > 1:\n best_win = None\n wins_by_x = {}\n wins_by_y = {}\n for win in possible_wins:\n if win[0] not in wins_by_x.keys():\n wins_by_x[win[0]] = []\n if win[1] not in wins_by_y.keys():\n wins_by_y[win[1]] = []\n wins_by_x[win[0]].append(win)\n wins_by_y[win[1]].append(win)\n for y in wins_by_y:\n if len(wins_by_y[y]) > 1: \n for win in wins_by_y[y]:\n if best_win is None or win[0] < best_win[0]:\n best_win = win \n return best_win\n\n else: return None",
"def check_victory(board):\n\n for idx in range(3):\n if board[idx][0] != ' ' and board[idx][0] == board[idx][1] == board[idx][2]:\n # This checks if all items in each horizontal row is complete.\n print('Victory to ' + board[idx][0])\n return True\n elif board[0][idx] != ' ' and board[0][idx] == board[1][idx] == board[2][idx]:\n # This checks if all the items in each vertical column is complete.\n print('Victory to ' + board[0][idx])\n return True\n\n if board[0][0] != ' ' and board[0][0] == board[1][1] == board[2][2]:\n # This checks if the left to right diagonal is complete.\n print('Victory to ' + board[0][0])\n return True\n elif board[2][0] != ' ' and board[2][0] == board[1][1] == board[0][2]:\n # This checks if the right to left diagonal is complete.\n print('Victory to ' + board[2][0])\n return True\n\n return False",
"def is_winner(self, player, cell):\n \n column = cell % 3\n row = cell - (cell % 3)\n diagonal = cell % 2 == 0\n \n victory = False\n \n cells, boards = zip(*self.cells)\n \n if diagonal:\n victory = victory or \\\n all([c == player for c in cells[0:9:4]]) or \\\n all([c == player for c in cells[2:8:2]])\n \n victory = victory or \\\n all([c == player for c in cells[column:9:3]]) or \\\n all([c == player for c in cells[row:row+3]])\n \n return victory\n return False",
"def col_win(board, player):\n for row in board.T:\n if check_row(row, player):\n return True\n return False",
"def AI(current_board, AI_symbol, opponent_symbol, difficulty): #Written by Cody West\n victory_conditions = [[0,4,8],[2,4,6],[0,1,2],[3,4,5],[6,7,8],[0,3,6],[1,4,7],[2,5,8]] #Establishes victory conditions to be checked\n if difficulty >= 2: #If difficulty is at least 2\n ## Cody -- you could just write:\n ## for slots in victory_conditions\n for n in range(len(victory_conditions)): #For each victory condition in victory_conditions ## Oops\n slots = victory_conditions[n] #Take the victory conditions and put them in a new list ## Oops \n check = [] #Creates empty folder called check\n for i in range(len(slots)): #For each spot in slots\n check.append(current_board[slots[i]]) #Add the corresponding spot from the current board to check\n ## This you can do even more efficiently using a beautiful syntax called\n ## \"list comprehension\" which entered python some years ago -- watch\n ## me do it in one line:\n ## check = [current_board[s] for s in slots]\n if check.count(AI_symbol)==2 and check.count(\" \")==1: #If there are any rows where the AI has two symbols and there's one empty spot\n return(slots[check.index(\" \")]) #Return the empty spot from that row\n ## Oops -- you repeat the code again here for no reason\n for n in range(len(victory_conditions)): #For each victory condition in victory_conditions\n slots = victory_conditions[n] #Take the victory conditions and put them in a new list\n check = [] #Creates empty folder called check\n for i in range(len(slots)): #For each spot in slots\n check.append(current_board[slots[i]]) #Add the corresponding spot from the current board to check\n if check.count(opponent_symbol)==2 and check.count(\" \")==1: #If there are any rows where the opponent has two symbols and there's one empty spot\n return(slots[check.index(\" \")]) #Return the empty spot from that row\n if difficulty >= 3: #If difficulty is at least 3\n ## It looks like you're doing an identical loop here -- I\n ## wonder why you don't move the if statement inside the loop\n ## -- I believe that would significantly shorten your code\n for n in range(len(victory_conditions)): #For each victory condition in victory_conditions\n slots = victory_conditions[n] #Take the victory conditions and put them in a new list\n check = [] #Creates empty folder called check\n for i in range(len(slots)): #For each spot in slots\n check.append(current_board[slots[i]]) #Add the corresponding spot from the current board to check\n if check.count(AI_symbol)==1 and check.count(\" \")==2: #If there are any rows where the AI has one symbol and there's two empty spots\n if check[0] == \" \": #If the first slot from check is empty\n return(slots[0]) #Return the first slot\n else: \n return(slots[2]) #Return the third slot\n if difficulty == 4: #If difficulty is 4\n if current_board[4] == \" \": #If the center is empty\n return(4) #Take the center\n elif current_board[0] or current_board[2] or current_board[6] or current_board[8] == \" \": #Else, if a corner is open\n corners = 2*random.randint(0,4) #Selects a random corner (or center, which will reject)\n while current_board[corners] != \" \": #Until the corner selected is empty\n corners = 2*random.randint(0,4) #Select a new corner or center\n return(corners) #Return empty corner\n else:\n sides = 2*random.randint(0,3)+1 #Selects a side\n while current_board[sides] != \" \": #Until the side is empty\n sides = 2*random.randint(0,3)+1 #Selects a new side\n return(sides) #Returns empty side\n if difficulty < 4: #If difficulty is less than 4\n ran = random.randint(0,8) #Picks random spot on board\n while current_board[ran] != \" \": #Until the spot is empty\n ran = random.randint(0,8) #Picks a new spot\n return(ran) #Returns empty spot",
"def check_win(self):\r\n wins = [self.check_rows(), self.check_cols(), self.check_diag()]\r\n for case, pos in wins:\r\n if case != -1:\r\n print('Game over!')\r\n if self.grid[case][-1] == self.computer:\r\n print('The computer won!')\r\n return (True, pos)\r\n print('The player won!')\r\n return (True, pos)\r\n\r\n return (self.check_draw(), None)",
"def isSolved(board):\n for player in [1, 2]:\n if [player]*3 in chain(\n board, # Rows\n zip(board), # Columns\n [ # Diagonals\n [board[i][i] for i in range(len(board))],\n [board[len(board) - i - 1][i] for i in range(len(board))]\n ]\n ):\n return player\n return -1 if 0 in chain(*board) else 0",
"def __check_winner(self):\n for i in range(0, 3):\n col = self.__get_col(i)\n if col.get(self.player_char) == 3:\n print('\\nYou win!')\n self.game_ended = True\n return\n if col.get(self.opponent_char) == 3:\n print('\\nYou lose.')\n self.game_ended = True\n return\n row = self.__get_row(i)\n if row.get(self.player_char) == 3:\n print('\\nYou win!')\n self.game_ended = True\n return\n if row.get(self.opponent_char) == 3:\n print('\\nYou lose.')\n self.game_ended = True\n return\n for i in range(0, 2):\n diag = self.__get_diag(i)\n if diag.get(self.player_char) == 3:\n print('\\nYou win!')\n self.game_ended = True\n return\n if diag.get(self.opponent_char) == 3:\n print('\\nYou lose.')\n self.game_ended = True\n return\n if self.state.count(' ') == 0:\n print('\\nDraw!')\n self.game_ended = True",
"def is_winning(game: List[int]) -> bool:\n # performs the Vertical XOR by reducing as list of bool (lst) with xor lambda\n reduce_xor = (lambda lst: reduce(__xor__, lst, False))\n\n # converts game into binary and the converts/permutes the row and col\n game_bin_row_col = row_to_col(game_to_bin(game))\n\n # performs Vertical XOR on every column\n res_vert_xor = list(map(reduce_xor, game_bin_row_col))\n\n return reduce(__or__, res_vert_xor, False)",
"def TicTacToe(): #Written by Cody West\n current_board = [\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \"] #Empty board\n players = 0 #Number of players\n human_turn = 0 #Indicates whether the human goes first or second (is 0 for two player games)\n turn = 1 #Turn number\n while players != 1 and players != 2: #While a valid number of players has not been chosen\n players = int(raw_input(\"How many players are there?\")) #Asks how many players there are\n if players < 1 or players > 2: #If the choice is not valid\n print(\"Please pick 1 or 2 players\") #Prints error message\n if players == 1: #If 1 player\n difficulty = 0 #Difficulty variable\n while difficulty != 1 and difficulty != 2 and difficulty != 3 and difficulty != 4: #While a valid difficulty has not been chose\n difficulty = int(raw_input(\"Pick a difficulty. 1 is easiest, 4 is hardest\")) #Ask for a difficulty\n if difficulty != 1 and difficulty != 2 and difficulty != 3 and difficulty != 4: #If difficulty choice is not valid\n print(\"Please pick a difficulty between 1 and 4\") #Prints error message\n while human_turn != 1 and human_turn != 2: #While a human turn has not been chosen\n human_turn = int(raw_input(\"Would you like to go first (1) or second (2)?\")) #Ask for human turn\n if human_turn != 1 and human_turn != 2: #If a valid turn is not chosen\n print(\"Please pick turn 1 or 2\") #Print error message\n if human_turn == 1: #If human goes first\n player1 = \"human\" #Player 1 is human\n player2 = \"AI\" #Player 2 is AI\n elif human_turn == 2: #If human goes second\n player1 = \"AI\" #Player 1 is AI\n player2 = \"human\" #Player 2 is human\n else: #If neither\n player1 = \"human\" #Player 1 is human\n player2 = \"human\" #Player 2 is human\n while turn < 10: #While the number of turns in Tic Tac Toe has not been exceeded\n if turn < 3: #For the first three turns\n draw_example_board() #Draw a board showing the slot numbers\n draw_board(current_board) #Draw current board\n ## You could write this logic much more compactly -- try to avoid having so many\n ## lines of code that look identical. You have four different update_board calls\n ## here where you could have just one.\n if turn%2 == 1: #If it's an odd numbered turn\n if player1 == \"human\":\n print(\"human\")\n update_board(current_board, get_input(current_board, turn), \"X\") #Update board with player 1's selection and X\n else:\n print(\"AI\")\n update_board(current_board, AI(current_board,\"X\",\"O\", difficulty), \"X\") #Update board with AI selection\n else:\n if player2 == \"human\":\n print(\"human\")\n update_board(current_board, get_input(current_board, turn), \"O\") #Update board with player 2's selection and X\n else:\n print(\"AI\")\n update_board(current_board, AI(current_board,\"O\",\"X\", difficulty), \"O\") #Update board with AI selection\n if check_victory(current_board) == \"done\":\n return \"whatever\"#Check victory\n turn = turn + 1 #Increase turn number",
"def check_board(board_state, player_symbol, display_message = False):\n\n is_board_completely_filled = board_state.isalpha()\n\n indices_set = set([ind+1 for ind, val in enumerate(board_state) if val == player_symbol])\n\n if {1, 2, 3}.issubset(indices_set) or {4, 5, 6}.issubset(indices_set) or {7, 8, 9}.issubset(indices_set):\n\n if display_message:\n print(\"Row completed..!!!\")\n print(\"Player \"+player_symbol+\" won the game.\")\n\n return True\n\n if {1, 4, 7}.issubset(indices_set) or {2, 5, 8}.issubset(indices_set) or {3, 6, 9}.issubset(indices_set):\n\n if display_message:\n print(\"Column completed..!!!\")\n print(\"Player \"+player_symbol+\" won the game.\")\n\n return True\n if {1, 5, 9}.issubset(indices_set) or {3, 5, 7}.issubset(indices_set):\n\n if display_message:\n print(\"Diagonal completed..!!!\")\n print(\"Player \"+player_symbol+\" won the game.\")\n\n return True\n\n if is_board_completely_filled:\n\n if display_message:\n print(\"Game is drawn...!\")\n\n return \"Draw\"\n\n return False"
] | [
"0.71583956",
"0.70575804",
"0.7002491",
"0.6714407",
"0.669649",
"0.66591203",
"0.66099006",
"0.6589296",
"0.6558027",
"0.6533149",
"0.6491051",
"0.6489824",
"0.644219",
"0.6395233",
"0.6353411",
"0.63451284",
"0.6342977",
"0.6325276",
"0.62973696",
"0.6287886",
"0.6280949",
"0.6278432",
"0.6277261",
"0.62752616",
"0.6262734",
"0.62560654",
"0.6245277",
"0.6235003",
"0.62217224",
"0.618498"
] | 0.76965 | 0 |
Used to build the gender branch of our face recognition network. This branch is composed of three Conv > BN > Pool > Dropout blocks, followed by the Dense output layer. | def build_gender_branch(self, inputs, num_genders=2):
x = Lambda(lambda c: tf.image.rgb_to_grayscale(c))(inputs)
x = self.make_default_hidden_layers(inputs)
x = Flatten()(x)
x = Dense(128)(x)
x = Activation("relu")(x)
x = BatchNormalization()(x)
x = Dropout(0.5)(x)
x = Dense(num_genders)(x)
x = Activation("sigmoid", name="gender_output")(x)
return x | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_discriminator(self):\n img_shape = (self.img_size[0], self.img_size[1], self.channels)\n\n model = Sequential()\n ###############\n # Conv Stack 1:\n ###############\n model.add(\n Conv2D(128, kernel_size=5, strides=2, input_shape=img_shape, padding=\"same\")\n ) # 128x128 -> 64x64\n\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.2))\n\n ###############\n # Conv Stack 2:\n ###############\n model.add(\n Conv2D(128, kernel_size=5, strides=2, padding=\"same\")\n ) # 64x64 -> 32x32\n # model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))\n\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.25))\n\n ###############\n # Conv Stack 3:\n ###############\n model.add(\n Conv2D(128, kernel_size=4, strides=2, padding=\"same\")\n ) # 32x32 -> 16x16\n\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.25))\n\n ###############\n # Conv Stack 4:\n ###############\n model.add(Conv2D(128, kernel_size=4, strides=1, padding=\"same\")) # 16x16 -> 8x8\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.25))\n\n ###############\n # Conv Stack 5:\n ###############\n model.add(Conv2D(128, kernel_size=3, strides=1, padding=\"same\")) # 8x8 -> 4x4\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Dropout(0.4))\n\n model.add(Flatten())\n model.add(Dense(1, activation=\"sigmoid\")) # important binary classification.\n\n model.summary()\n\n # Model require Pair.\n img = Input(shape=img_shape)\n validity = model(img)\n\n return Model(img, validity)",
"def discriminator_block(in_filters, out_filters):\n layers = [ nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1),\n nn.LeakyReLU(0.01)]\n return layers",
"def discriminator_block(in_filters, out_filters, normalize=True):\n layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]\n if normalize:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers",
"def build_bisenet(inputs, num_classes):\n\n ### The spatial path\n ### The number of feature maps for each convolution is not specified in the paper\n ### It was chosen here to be equal to the number of feature maps of a classification\n ### model at each corresponding stage\n # spatial_net = fluid.layers.resize_bilinear(inputs, [Image_Height/8, Image_Width/8])\n # print('spatial_net_1',spatial_net)\n\n ## spatial path\n spatial_net = ConvBlock(inputs, num_filters=64, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=128, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=256, kernel_size=3, stride=2)\n # print(\"spatial_net:\", spatial_net)\n\n # spatial_net = fluid.layers.resize_bilinear(spatial_net, [Image_Height/8, Image_Width/8])\n # print('spatial_net_2',spatial_net)\n ### Context path\n model = ResNet(is_test=False)\n # spatial_net = model.bottleneck_block1(inputs)\n end_points_16, end_points_32 = model.net(inputs)\n net_4 = AttentionRefinementModule(end_points_16, num_filters=512)\n net_5 = AttentionRefinementModule(end_points_32, num_filters=1024)\n global_channels = fluid.layers.reduce_mean(net_5, [2, 3], keep_dim=True)\n net_5_scaled = fluid.layers.elementwise_mul(net_5, global_channels, axis=0)\n\n ### Combining the paths\n net_4 = Upsample(net_4, scale=2)\n net_5_scaled = Upsample(net_5_scaled, scale=4)\n # print('net_4, net_5:', [net_4, net_5_scaled])\n # layers_concat = list()\n # layers_concat.append(spatial_net)\n ## layers_concat.append(net_4)\n # layers_concat.append(net_5_scaled)\n context_net = fluid.layers.concat([spatial_net, net_4, net_5_scaled], axis=1) #\n # context_net = fluid.layers.concat(input=layers_concat,axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([net_4, net_5_scaled], axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([spatial_net,context_net], axis=1)\n # print('context_net2',context_net)\n\n ### FFM\n # net = FeatureFusionModule(input_1=spatial_net, input_2=context_net, num_filters=num_classes)\n net = FeatureFusionModule(inputs=context_net, num_filters=num_classes)\n\n # print('net', net)\n\n ## [batch_zize, num_filters, 128, 64]\n\n ### Final upscaling and finish\n # net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 128])\n # print('conv2d_transpose', net)\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn1')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[128, 256])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn2')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 512])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn3')\n #net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[512, 1024])\n #net = batch_normalization(net, relu=True, name='conv2d_transpose_bn4')\n # print('net',net)\n net = fluid.layers.image_resize(net, out_shape=[512, 1024], resample='BILINEAR')\n\n net = fluid.layers.conv2d(net, num_classes, 1)\n return net",
"def discriminator_block(in_filters, out_filters, f_size=4, normalize=True,stride=2):\n layers = [nn.Conv2d(in_filters, out_filters, f_size, stride=stride, padding=0)]\n if normalize:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers",
"def discriminator_block(in_filters, out_filters, normalize=True):\n layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]\n if normalize:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers",
"def discriminator_block(in_filters, out_filters, normalize=True):\n layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]\n if normalize:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers",
"def discriminator_block(in_filters, out_filters, bn=True):\n block = [ nn.Conv2d(in_filters, out_filters, 3, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Dropout2d(0.25)]\n if bn:\n block.append(nn.BatchNorm2d(out_filters, 0.8))\n return block",
"def discriminator_block(in_filters, out_filters, stride, normalize):\n layers = [nn.Conv2d(in_filters, out_filters, 3, stride, 1)]\n if normalize:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers\n\n layers = []\n in_filters = channels\n for out_filters, stride, normalize in [ (64, 2, False),\n (128, 2, True),\n (256, 2, True),\n (512, 1, True)]:\n layers.extend(discriminator_block(in_filters, out_filters, stride, normalize))\n in_filters = out_filters\n\n layers.append(nn.Conv2d(out_filters, 1, 3, 1, 1))\n\n self.model = nn.Sequential(*layers)\n\n \"\"\"CycleGAN\n \"\"\"\n def discriminator_block(in_filters, out_filters, normalize=True):\n \"\"\"Returns downsampling layers of each discriminator block\"\"\"\n layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]\n if normalize:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers\n\n self.model = nn.Sequential(\n *discriminator_block(in_channels, 64, normalize=False),\n *discriminator_block(64, 128),\n *discriminator_block(128, 256),\n *discriminator_block(256, 512),\n nn.ZeroPad2d((1, 0, 1, 0)),\n nn.Conv2d(512, 1, 4, padding=1)\n )\n\n \"\"\"DCGAN\n \"\"\"\n def discriminator_block(in_filters, out_filters, bn=True):\n block = [ nn.Conv2d(in_filters, out_filters, 3, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Dropout2d(0.25)]\n if bn:\n block.append(nn.BatchNorm2d(out_filters, 0.8))\n return block\n\n self.model = nn.Sequential(\n *discriminator_block(opt.channels, 16, bn=False),\n *discriminator_block(16, 32),\n *discriminator_block(32, 64),\n *discriminator_block(64, 128),\n )\n\n # The height and width of downsampled image\n ds_size = opt.img_size // 2**4\n self.adv_layer = nn.Sequential( nn.Linear(128*ds_size**2, 1),\n nn.Sigmoid())\n\n\n \"\"\"DiscoGAN\n \"\"\"\n def discriminator_block(in_filters, out_filters, normalization=True):\n \"\"\"Returns downsampling layers of each discriminator block\"\"\"\n layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers\n\n self.model = nn.Sequential(\n *discriminator_block(in_channels, 64, normalization=False),\n *discriminator_block(64, 128),\n *discriminator_block(128, 256),\n nn.ZeroPad2d((1, 0, 1, 0)),\n nn.Conv2d(256, 1, 4, padding=1)\n )\n\n \"\"\"DraGAN\n \"\"\"\n def discriminator_block(in_filters, out_filters, bn=True):\n block = [ nn.Conv2d(in_filters, out_filters, 3, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Dropout2d(0.25)]\n if bn:\n block.append(nn.BatchNorm2d(out_filters, 0.8))\n return block\n\n self.model = nn.Sequential(\n *discriminator_block(opt.channels, 16, bn=False),\n *discriminator_block(16, 32),\n *discriminator_block(32, 64),\n *discriminator_block(64, 128),\n )\n\n # The height and width of downsampled image\n ds_size = opt.img_size // 2**4\n self.adv_layer = nn.Sequential( nn.Linear(128*ds_size**2, 1),\n nn.Sigmoid())\n\n \"\"\"EBGAN\n \"\"\"\n # Upsampling\n self.down = nn.Sequential(\n nn.Conv2d(opt.channels, 64, 3, 2, 1),\n nn.ReLU(),\n )\n # Fully-connected layers\n self.down_size = (opt.img_size // 2)\n down_dim = 64 * (opt.img_size // 2)**2\n\n self.embedding = nn.Linear(down_dim, 32)\n\n self.fc = nn.Sequential(\n nn.BatchNorm1d(32, 0.8),\n nn.ReLU(inplace=True),\n nn.Linear(32, down_dim),\n nn.BatchNorm1d(down_dim),\n nn.ReLU(inplace=True)\n )\n # Upsampling\n self.up = nn.Sequential(\n nn.Upsample(scale_factor=2),\n nn.Conv2d(64, opt.channels, 3, 1, 1)\n )\n\n \"\"\"InfoGAN\n \"\"\"\n def discriminator_block(in_filters, out_filters, bn=True):\n \"\"\"Returns layers of each discriminator block\"\"\"\n block = [ nn.Conv2d(in_filters, out_filters, 3, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Dropout2d(0.25)]\n if bn:\n block.append(nn.BatchNorm2d(out_filters, 0.8))\n return block\n\n self.conv_blocks = nn.Sequential(\n *discriminator_block(opt.channels, 16, bn=False),\n *discriminator_block(16, 32),\n *discriminator_block(32, 64),\n *discriminator_block(64, 128),\n )\n\n # The height and width of downsampled image\n ds_size = opt.img_size // 2**4\n\n # Output layers\n self.adv_layer = nn.Sequential(nn.Linear(128*ds_size**2, 1))\n self.aux_layer = nn.Sequential(\n nn.Linear(128*ds_size**2, opt.n_classes),\n nn.Softmax()\n )\n self.latent_layer = nn.Sequential(nn.Linear(128*ds_size**2, opt.code_dim))\n\n \"\"\"LSGAN\n \"\"\"\n def discriminator_block(in_filters, out_filters, bn=True):\n block = [ nn.Conv2d(in_filters, out_filters, 3, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Dropout2d(0.25)]\n if bn:\n block.append(nn.BatchNorm2d(out_filters, 0.8))\n return block\n\n self.model = nn.Sequential(\n *discriminator_block(opt.channels, 16, bn=False),\n *discriminator_block(16, 32),\n *discriminator_block(32, 64),\n *discriminator_block(64, 128),\n )\n\n # The height and width of downsampled image\n ds_size = opt.img_size // 2**4\n self.adv_layer = nn.Linear(128*ds_size**2, 1)\n\n \"\"\"Pix2Pix\n \"\"\"\n def discriminator_block(in_filters, out_filters, normalization=True):\n \"\"\"Returns downsampling layers of each discriminator block\"\"\"\n layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers\n\n self.model = nn.Sequential(\n *discriminator_block(in_channels*2, 64, normalization=False),\n *discriminator_block(64, 128),\n *discriminator_block(128, 256),\n *discriminator_block(256, 512),\n nn.ZeroPad2d((1, 0, 1, 0)),\n nn.Conv2d(512, 1, 4, padding=1, bias=False)\n )\n\n \"\"\"Pixelda\n \"\"\"\n def block(in_features, out_features, normalization=True):\n \"\"\"Discriminator block\"\"\"\n layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True) ]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_features))\n return layers\n\n self.model = nn.Sequential(\n *block(opt.channels, 64, normalization=False),\n *block(64, 128),\n *block(128, 256),\n *block(256, 512),\n nn.Conv2d(512, 1, 3, 1, 1))\n\n \"\"\"SRGAN\n \"\"\"\n def discriminator_block(in_filters, out_filters, stride, normalize):\n \"\"\"Returns layers of each discriminator block\"\"\"\n layers = [nn.Conv2d(in_filters, out_filters, 3, stride, 1)]\n if normalize:\n layers.append(nn.BatchNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers\n\n layers = []\n in_filters = in_channels\n for out_filters, stride, normalize in [ (64, 1, False),\n (64, 2, True),\n (128, 1, True),\n (128, 2, True),\n (256, 1, True),\n (256, 2, True),\n (512, 1, True),\n (512, 2, True),]:\n layers.extend(discriminator_block(in_filters, out_filters, stride, normalize))\n in_filters = out_filters\n\n # Output layer\n layers.append(nn.Conv2d(out_filters, 1, 3, 1, 1))\n\n self.model = nn.Sequential(*layers)\n\n \"\"\"StarGAN\n \"\"\"\n channels, img_size, _ = img_shape\n\n def discriminator_block(in_filters, out_filters):\n \"\"\"Returns downsampling layers of each discriminator block\"\"\"\n layers = [ nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1),\n nn.LeakyReLU(0.01)]\n return layers\n\n layers = discriminator_block(channels, 64)\n curr_dim = 64\n for _ in range(n_strided - 1):\n layers.extend(discriminator_block(curr_dim, curr_dim*2))\n curr_dim *= 2\n\n self.model = nn.Sequential(*layers)\n\n # Output 1: PatchGAN\n self.out1 = nn.Conv2d(curr_dim, 1, 3, padding=1, bias=False)\n # Output 2: Class prediction\n kernel_size = img_size // 2**n_strided\n self.out2 = nn.Conv2d(curr_dim, c_dim, kernel_size, bias=False)\n\n \"\"\"WGAN\n \"\"\"\n nn.Linear(int(np.prod(img_shape)), 512),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(512, 256),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(256, 1)\n\n \n\n\n# import torch.nn as nn\n# import torch\n# from torch.nn.modules import conv, Linear\n# import torch.nn.functional as F\n# from src.snlayers.snconv2d import SNConv2d\n\n# class _netG(nn.Module):\n# def __init__(self, nz, nc, ngf):\n# super(_netG, self).__init__()\n# self.main = nn.Sequential(\n# # input is Z, going into a convolution\n# nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=True),\n# nn.BatchNorm2d(ngf * 8),\n# nn.ReLU(True),\n# # state size. (ngf*8) x 4 x 4\n# nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=True),\n# nn.BatchNorm2d(ngf * 4),\n# nn.ReLU(True),\n# # state size. (ngf*4) x 8 x 8\n# nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=True),\n# nn.BatchNorm2d(ngf * 2),\n# nn.ReLU(True),\n# # state size. (ngf*2) x 16 x 16\n# nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=True),\n# nn.BatchNorm2d(ngf),\n# nn.ReLU(True),\n# # state size. (ngf) x 32 x 32\n# nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=True),\n# nn.Tanh()\n# # state size. (nc) x 32 x 32\n# )\n\n# def forward(self, input):\n# output = self.main(input)\n# return output\n\n# # Actor\n# class _netE(nn.Module):\n# def __init__(self, nc, ndf):\n# super(_netE, self).__init__()\n\n# self.main = nn.Sequential(\n# SNConv2d(nc, ndf, 7, 4, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n# SNConv2d(ndf, 3, 7, 4, 1, bias=False),\n# nn.Sigmoid()\n# )\n# def forward(self, input):\n# output = self.main(input) \n# return output.view(-1, 3).squeeze(1)\n\n# class _netD1(nn.Module):\n# def __init__(self, nc, ndf):\n# super(_netD1, self).__init__()\n\n self.main = nn.Sequential(\n SNConv2d(nc, ndf, 5, 2, 2), \n nn.LeakyReLU(0.2, inplace=True),\n SNConv2d(ndf, ndf * 2, 5, 2, 2),\n nn.LeakyReLU(0.2, inplace=True),\n SNConv2d(ndf * 2, ndf * 4, 5, 2, 2),\n nn.LeakyReLU(0.2, inplace=True),\n SNConv2d(ndf * 4, ndf * 8, 5, 2, 2),\n nn.LeakyReLU(0.2, inplace=True),\n SNConv2d(ndf * 8, 1, 4),\n nn.Sigmoid()\n\n # input is (nc) x 64 x 64\n nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf) x 32 x 32\n nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*2) x 16 x 16\n nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*4) x 8 x 8\n nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 8),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*8) x 4 x 4\n nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),\n nn.Sigmoid()\n\n # input is (nc) x 64 x 64\n nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf) x 32 x 32\n nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*2) x 16 x 16\n nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(ndf * 4, ndf * 16, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 16),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*16) x 4 x 4\n nn.Conv2d(ndf * 16, 1, 4, 1, 0, bias=False),\n nn.Sigmoid()\n # )\n # def forward(self, input):\n # output = self.main(input)\n # output = output.view(-1, 1).squeeze(1)\n # return output\n\n# class _netD2(nn.Module):\n# def __init__(self, nc, ndf):\n# super(_netD2, self).__init__()\n\n# self.main = nn.Sequential(\n# SNConv2d(nc, ndf, 3, 1, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n# SNConv2d(ndf, ndf, 16, 2, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n# # ndf x 30 x 30\n\n# SNConv2d(ndf, ndf * 2, 3, 1, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n# SNConv2d(ndf * 2, ndf * 2, 16, 2, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n# # (ndf * 2) x 9 x 9\n\n# SNConv2d(ndf * 2, ndf * 4, 3, 1, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n# SNConv2d(ndf * 4, 1, 9, 1, 0, bias=False),\n# nn.Sigmoid()\n# # 1 x 1 x 1\n# )\n# def forward(self, input):\n# output = self.main(input)\n# return output.view(-1, 1).squeeze(1)\n\n# class _netD3(nn.Module):\n# def __init__(self, nc, ndf):\n# super(_netD3, self).__init__()\n\n# self.main = nn.Sequential(\n# # input is (nc) x 32 x 32\n# SNConv2d(nc, ndf, 3, 1, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n# SNConv2d(ndf, ndf, 4, 2, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n# # state size. (ndf) x 1 x 32\n# SNConv2d(ndf, ndf * 2, 3, 1, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n# SNConv2d(ndf*2, ndf * 2, 4, 2, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n# # state size. (ndf*2) x 16 x 16\n# SNConv2d(ndf * 2, ndf * 4, 3, 1, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n# SNConv2d(ndf * 4, ndf * 4, 4, 2, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n\n# SNConv2d(ndf * 4, ndf * 8, 3, 1, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n# SNConv2d(ndf * 8, ndf * 8, 4, 2, 1, bias=True),\n\n# # state size. (ndf*8) x 4 x 4\n# SNConv2d(ndf * 8, ndf * 16, 3, 1, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n# SNConv2d(ndf * 16, 1, 4, 1, 0, bias=False),\n# nn.Sigmoid()\n# )\n# def forward(self, input):\n# output = self.main(input)\n# output = output.view(-1, 1).squeeze(1)\n# return output\n\n\n# _netD_list = [_netD1]",
"def discriminator_block(in_filters, out_filters, normalize=True):\r\n layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]\r\n if normalize:\r\n layers.append(nn.InstanceNorm2d(out_filters))\r\n layers.append(nn.LeakyReLU(0.2, inplace=True))\r\n return layers",
"def discriminator_block(in_filters, out_filters, normalization=True):\n layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers",
"def discriminator_block(in_filters, out_filters, stride, normalize):\n layers = [nn.Conv2d(in_filters, out_filters, 3, stride, 1)]\n if normalize:\n layers.append(nn.BatchNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers",
"def DCGAN_discriminator(img_dim, nb_patch, bn_mode, model_name=\"DCGAN_discriminator\", use_mbd=True):\n\n list_input = [Input(shape=img_dim, name=\"disc_input_%s\" % i) for i in range(nb_patch)]\n\n if K.image_dim_ordering() == \"th\":\n bn_axis = 1\n else:\n bn_axis = -1\n\n nb_filters = 64\n nb_conv = int(np.floor(np.log(img_dim[1]) / np.log(2)))\n list_filters = [nb_filters * min(8, (2 ** i)) for i in range(nb_conv)]\n\n # First conv\n x_input = Input(shape=img_dim, name=\"discriminator_input\")\n # x = Convolution2D(list_filters[0], 3, 3, subsample=(2, 2), name=\"disc_conv2d_1\", border_mode=\"same\")(x_input)\n # x = BatchNormalization(mode=bn_mode, axis=bn_axis)(x)\n # x = LeakyReLU(0.2)(x)\n\n x = MaxPooling2D(\n pool_size=(2, 2), strides=(2, 2))(x_input)\n x = Convolution2D(\n list_filters[0]/8, 1, 1, activation='relu', init='glorot_uniform',\n border_mode='same', name='disc_conv2d_1')(x)\n x = BatchNormalization(mode=bn_mode, axis=bn_axis)(x)\n e1 = Convolution2D(\n list_filters[0]/2, 1, 1, activation='relu', init='glorot_uniform',\n border_mode='same')(x)\n e2 = Convolution2D(\n list_filters[0]/2, 3, 3, activation='relu', init='glorot_uniform',\n border_mode='same')(x)\n x = merge(\n [e1, e2], mode='concat', concat_axis=bn_axis)\n\n # Next convs\n for i, f in enumerate(list_filters[1:]):\n name = \"disc_conv2d_fire_%s\" % (i + 2)\n # x = Convolution2D(f, 3, 3, subsample=(2, 2), name=name, border_mode=\"same\")(x)\n # x = BatchNormalization(mode=bn_mode, axis=bn_axis)(x)\n # x = LeakyReLU(0.2)(x)\n x = MaxPooling2D(\n pool_size=(2, 2), strides=(2, 2))(x)\n x = Convolution2D(\n f/8, 1, 1, activation='relu', init='glorot_uniform',\n border_mode='same', name=name)(x)\n x = BatchNormalization(mode=bn_mode, axis=bn_axis)(x)\n e1 = Convolution2D(\n f/2, 1, 1, activation='relu', init='glorot_uniform',\n border_mode='same')(x)\n e2 = Convolution2D(\n f/2, 3, 3, activation='relu', init='glorot_uniform',\n border_mode='same')(x)\n x = merge(\n [e1, e2], mode='concat', concat_axis=bn_axis)\n\n x_flat = Flatten()(x)\n x = Dense(2, activation='softmax', name=\"disc_dense\")(x_flat)\n\n PatchGAN = Model(input=[x_input], output=[x, x_flat], name=\"PatchGAN\")\n print(\"PatchGAN summary\")\n PatchGAN.summary()\n\n x = [PatchGAN(patch)[0] for patch in list_input]\n x_mbd = [PatchGAN(patch)[1] for patch in list_input]\n\n if len(x) > 1:\n x = merge(x, mode=\"concat\", name=\"merge_feat\")\n else:\n x = x[0]\n\n if use_mbd:\n if len(x_mbd) > 1:\n x_mbd = merge(x_mbd, mode=\"concat\", name=\"merge_feat_mbd\")\n else:\n x_mbd = x_mbd[0]\n\n num_kernels = 100\n dim_per_kernel = 5\n\n M = Dense(num_kernels * dim_per_kernel, bias=False, activation=None)\n MBD = Lambda(minb_disc, output_shape=lambda_output)\n\n x_mbd = M(x_mbd)\n x_mbd = Reshape((num_kernels, dim_per_kernel))(x_mbd)\n x_mbd = MBD(x_mbd)\n x = merge([x, x_mbd], mode='concat')\n\n x_out = Dense(2, activation=\"softmax\", name=\"disc_output\")(x)\n\n discriminator_model = Model(input=list_input, output=[x_out], name=model_name)\n\n return discriminator_model",
"def discriminator_block(in_filters, out_filters, stride, normalize):\n layers = [nn.Conv2d(in_filters, out_filters, 3, stride, 1)]\n if normalize:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers",
"def discriminator_model_organs():\n # Initialize the weights\n init = tf.random_normal_initializer(0.0, 0.02)\n\n img_shape = (512, 512, 1)\n\n # Source and target image input\n source_img = tf.keras.Input(shape=img_shape)\n target_img = tf.keras.Input(shape=img_shape)\n\n # Concatenate images channel-wise\n src_tgt_img = Concatenate()([source_img, target_img]) # L: 512 x 512 x 1 # G: 256 x 256 x 1\n\n # C128\n d1 = Conv2D(filters=128, kernel_size=(4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(\n src_tgt_img) # L: 256 x 256 x 128 # G: 128 x 128 x 128 # RF: 4\n d1 = LeakyReLU(alpha=0.2)(d1)\n\n # C256\n d2 = Conv2D(filters=256, kernel_size=(4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(\n d1) # L: 128 x 128 x 256 # G: 64 x 64 x 256 # RF: 10\n d2 = BatchNormalization()(d2)\n d2 = LeakyReLU(alpha=0.2)(d2)\n\n # C256\n d3 = Conv2D(filters=256, kernel_size=(4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(\n d2) # L: 64 x 64 x 256 # G: 32 x 32 x 256 # RF: 22\n d3 = BatchNormalization()(d3)\n d3 = LeakyReLU(alpha=0.2)(d3)\n\n # C512\n d4 = Conv2D(filters=512, kernel_size=(4, 4), strides=(1, 1), padding='valid', kernel_initializer=init)(\n d3) # L: 61 x 61 x 512 # G: 29 x 29 x 512 # RF: 46\n d4 = BatchNormalization()(d4)\n d4 = LeakyReLU(alpha=0.2)(d4)\n d4 = ZeroPadding2D()(d4) # L: 63 x 63 x 512 # G: 31 x 31 x 512\n\n # Patch output\n d5 = Conv2D(filters=1, kernel_size=(4, 4), strides=(1, 1), padding='valid', kernel_initializer=init)(\n d4) # L: 60 x 60 x 1 # G: 28 x 28 x 1 # RF: 70\n output_patch = Activation('sigmoid')(d5)\n\n # Define model\n discriminator_model = tf.keras.Model([source_img, target_img], output_patch)\n return discriminator_model",
"def discriminator_block(in_filters, out_filters, normalize=True):\n layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]\n if normalize:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers",
"def Unet4(shape, nb_filters=32, exp=1, kernel_size=3, initialization=\"glorot_uniform\", activation=\"relu\", sigma_noise=0, output_channels=1, drop=0.0, regularization=None):\n \n \n input_layer = Input(shape=shape)\n\n conv1 = ConvBlock(input_layer, nb_filters=nb_filters, kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n if drop > 0.0: pool1 = Dropout(drop)(pool1)\n\n conv2 = ConvBlock(pool1, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n if drop > 0.0: pool2 = Dropout(drop)(pool2)\n\n conv3 = ConvBlock(pool2, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n if drop > 0.0: pool3 = Dropout(drop)(pool3)\n\n conv4 = ConvBlock(pool3, nb_filters=nb_filters * 2 **(3 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n if drop > 0.0: pool4 = Dropout(drop)(pool4)\n\n deconv5 = DeconvBlock(conv4, residual=conv3, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv5 = Dropout(drop)(deconv5)\n\n deconv6 = DeconvBlock(deconv5, residual=conv2, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv6 = Dropout(drop)(deconv6)\n\n deconv7 = DeconvBlock(deconv6, residual=conv1, nb_filters=nb_filters, kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv7 = Dropout(drop)(deconv7)\n\n if sigma_noise > 0:\n deconv7 = GaussianNoise(sigma_noise)(deconv7)\n\n output_layer = Conv2D(filters=output_channels, kernel_size=(1, 1))(deconv7)\n output_layer = BatchNormalization()(output_layer)\n output_layer = Activation('softmax')(output_layer)\n\n model = Model(inputs=input_layer, outputs=output_layer, name='Unet')\n return model",
"def discriminator_model():\n\n Discriminator = Sequential(name='Discriminator')\n\n # Downsampling : 32x32x3 --> 16x16x64\n Discriminator.add(Conv2D(filters=64, kernel_size=(5, 5), strides=2, padding='same', \n kernel_initializer=RandomNormal(stddev=GAUSS_SD), \n input_shape=DISCRIMINATOR_INPUT))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Downsampling : 16x16x64 --> 8x8x128\n Discriminator.add(Conv2D(filters=128, kernel_size=(5, 5), strides=2, padding='same'))\n Discriminator.add(BatchNormalization(momentum=MOMENTUM))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Downsampling : 8x8x128 --> 4x4x256\n Discriminator.add(Conv2D(filters=128, kernel_size=(5, 5), strides=2, padding='same'))\n Discriminator.add(BatchNormalization(momentum=MOMENTUM))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Downsampling : 4x4x256 --> 2x2x512\n Discriminator.add(Conv2D(filters=512, kernel_size=(5, 5), strides=2, padding='same'))\n Discriminator.add(BatchNormalization(momentum=MOMENTUM))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Fully Connected Layer (classifier) , 2x2x512 (2048) --> 1\n Discriminator.add(Flatten())\n Discriminator.add(Dropout(DROPOUT))\n Discriminator.add(Dense(1))\n\n return Discriminator",
"def VGGModel(input_shape):\n \n\n X_input = Input(input_shape)\n \n # Creating a Neural Network (VGG-16)\n\n X = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(X_input)\n X = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(X)\n X = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(X)\n\n # Block 2\n X = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(X)\n X = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(X)\n X = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(X)\n\n # Block 3\n X = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(X)\n X = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(X)\n X = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(X)\n X = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(X)\n\n # Block 4\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(X)\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(X)\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(X)\n X = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(X)\n\n # Block 5\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(X)\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(X)\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(X)\n X = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(X)\n \n X = Flatten()(X)\n X = Dense(4096, activation='relu', kernel_initializer = 'he_normal', name='fc')(X)\n X = Dense(4096, activation='relu', kernel_initializer = 'he_normal', name='fc2')(X)\n X = Dense(2048, activation='relu', kernel_initializer = 'he_normal', name='fc3')(X)\n X = Dense(1024, activation='relu', kernel_initializer = 'he_normal', name='fc4')(X)\n X = Dense(512, activation='relu', kernel_initializer = 'he_normal', name='fc5')(X)\n X = Dense(256, activation='relu', kernel_initializer = 'he_normal', name='fc6')(X)\n X = Dense(2, activation='linear', name='regression')(X)\n model = Model(inputs=X_input, outputs = X, name='HappyModel')\n print(model.summary())\n \n return model",
"def gyf_net(\n inputs,\n backbone_layers,\n num_classes,\n option = 1,\n do_dropout = False,\n nd_weights=[ 0, 0, 0.01 , 0.01] ,\n wd_weights=[ 0, 0, 0.01, 0.01],\n name='gyf_net',\n FC_num_of_nuerons = 128\n):\n dropout_param = 0.5\n\n C3, C4, C5 = backbone_layers\n\n if option == 'reg_baseline_c5_dubreshko':\n GlobalAvgPool_features = keras.layers.GlobalAveragePooling2D()(C5)\n FC_regression = keras.layers.Dense(1024, name='FC_regression', activation='relu',\n kernel_regularizer=keras.regularizers.l2(wd_weights[2]),\n activity_regularizer=keras.regularizers.l2(nd_weights[2]))(GlobalAvgPool_features)\n FC_regression2 = keras.layers.Dense(512, name='FC_regression2', activation='relu',\n kernel_regularizer=keras.regularizers.l2(wd_weights[3]),\n activity_regularizer=keras.regularizers.l2(nd_weights[3]))(FC_regression)\n\n outputs = keras.layers.Dense(1, name='regression')(FC_regression2)\n\n if option== 'reg_baseline_c5':\n GlobalAvgPool_features = keras.layers.GlobalAveragePooling2D()(C5)\n FC_regression = keras.layers.Dense(1024, name='FC_regression', activation='relu')(GlobalAvgPool_features)\n if do_dropout:\n FC_regression = keras.layers.Dropout(dropout_param)(FC_regression)\n\n FC_regression2 = keras.layers.Dense(512, name='FC_regression2', activation='relu')(FC_regression)\n if do_dropout:\n FC_regression2 = keras.layers.Dropout(dropout_param)(FC_regression2)\n\n outputs = keras.layers.Dense(1, name='regression')(FC_regression2)\n\n if option== 'reg_fpn_p3':\n p3 = create_p3_feature(C3, C4, C5)\n GlobalAvgPool_features = keras.layers.GlobalAveragePooling2D()(p3)\n FC_regression = keras.layers.Dense(128, name='FC_regression', activation='relu')(GlobalAvgPool_features)\n FC_regression2 = keras.layers.Dense(64, name='FC_regression2', activation='relu')(FC_regression)\n outputs = keras.layers.Dense(1, name='regression')(FC_regression2)\n\n if option== 'reg_fpn_p3_p7_avg':\n # compute pyramid features as per https://arxiv.org/abs/1708.02002\n features = create_pyramid_features(C3, C4, C5)\n FC_submodel = submodel_single_out(do_dropout=do_dropout, dropout_param=dropout_param)\n outputs = [FC_submodel(GAF) for GAF in features]\n\n if option== 'reg_fpn_p3_p7_min_sig' or option== 'reg_fpn_p3_p7_mle' or option== 'reg_fpn_p3_p7_min_sig_L1' or option=='reg_fpn_p3_p7_mle_L1':\n # compute pyramid features as per https://arxiv.org/abs/1708.02002\n features = create_pyramid_features(C3, C4, C5)\n FC_submodel = submodel(do_dropout=do_dropout, dropout_param=dropout_param)\n outputs = [FC_submodel(GAF) for GAF in features]\n\n if option== 'reg_fpn_p3_p7_min_sig' or option== 'reg_fpn_p3_p7_mle' or option== 'reg_fpn_p3_p7_min_sig_L1' or option=='reg_fpn_p3_p7_mle_L1':\n # compute pyramid features as per https://arxiv.org/abs/1708.02002\n features = create_pyramid_features(C3, C4, C5)\n FC_submodel = submodel(do_dropout=do_dropout, dropout_param=dropout_param)\n outputs = [FC_submodel(GAF) for GAF in features]\n\n\n return keras.models.Model(inputs=inputs, outputs=outputs, name=name)",
"def VGGFace(input_shape=(224, 224, 3), n_classes=10, include_top=True):\n # Create the Tensor\n input = Input(shape=input_shape)\n\n # Block 1\n # 1st Convolutional Layer\n x = Conv2D(64, (3, 3), strides=(1, 1), padding='same', name='block1_conv1')(input)\n x = Activation('relu', name='block1_relu1')(x)\n\n # 2nd Convolutional Layer\n x = Conv2D(64, (3, 3), strides=(1, 1), padding='same', name='block1_conv2')(x)\n x = Activation('relu', name='block1_relu2')(x)\n x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block1_pool')(x)\n\n # Block 2\n # 3rd Convolutional Layer\n x = Conv2D(128, (3, 3), strides=(1, 1), padding='same', name='block2_conv1')(x)\n x = Activation('relu', name='block2_relu1')(x)\n\n # 4th Convolutional Layer\n x = Conv2D(128, (3, 3), strides=(1, 1), padding='same', name='block2_conv2')(x)\n x = Activation('relu', name='block2_relu2')(x)\n x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block2_pool')(x)\n\n # Block 3\n # 5th Convolutional Layer\n x = Conv2D(256, (3, 3), strides=(1, 1), padding='same', name='block3_conv1')(x)\n x = Activation('relu', name='block3_relu1')(x)\n\n # 6th Convolutional Layer\n x = Conv2D(256, (3, 3), strides=(1, 1), padding='same', name='block3_conv2')(x)\n x = Activation('relu', name='block3_relu2')(x)\n\n # 7th Convolutional Layer\n x = Conv2D(256, (3, 3), strides=(1, 1), padding='same', name='block3_conv3')(x)\n x = Activation('relu', name='block3_relu3')(x)\n x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block3_pool')(x)\n\n # Block 4\n # 8th Convolutional Layer\n x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='block4_conv1')(x)\n x = Activation('relu', name='block4_relu1')(x)\n\n # 9th Convolutional Layer\n x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='block4_conv2')(x)\n x = Activation('relu', name='block4_relu2')(x)\n\n # 10th Convolutional Layer\n x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='block4_conv3')(x)\n x = Activation('relu', name='block4_relu3')(x)\n x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block4_pool')(x)\n\n # Block 5\n # 11th Convolutional Layer\n x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='block5_conv1')(x)\n x = Activation('relu', name='block5_relu1')(x)\n\n # 12th Convolutional Layer\n x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='block5_conv2')(x)\n x = Activation('relu', name='block5_relu2')(x)\n\n # 13th Convolutional Layer\n x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='block5_conv3')(x)\n x = Activation('relu', name='block5_relu3')(x)\n x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block5_pool')(x)\n\n # Block 6\n # 14th Convulation Layer\n x = Conv2D(4096, (7, 7), strides=(1, 1), name='fc1_conv1')(x)\n x = Activation('relu', name='fc1_relu1')(x)\n x = Dropout(0.5)(x)\n\n # 15th Convulation Layer\n x = Conv2D(4096, (1, 1), strides=(1, 1), name='fc2_conv1')(x)\n x = Activation('relu', name='fc2_relu2')(x)\n x = Dropout(0.5, name='fc2_dropout')(x)\n\n # 16th Convulation Layer\n x = Conv2D(2622, (1, 1), strides=(1, 1), name='fc3_conv1')(x)\n x = Flatten(name='fc3_flatten')(x)\n\n if include_top:\n # Output Layer\n x = Activation('softmax', name='predictions_softmax')(x)\n\n # Create model\n model = keras.models.Model(input, x, name='vggface')\n return model",
"def discriminator_block(in_filters, out_filters, normalization=True):\n layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers",
"def discriminator_block(in_filters, out_filters, normalization=True):\n layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers",
"def __init__(self, num_gpus):\n\n super(Discriminator, self).__init__()\n n_in = IMG_CHANNELS\n n_out = 1\n\n feature_map = IMG_SIZE\n kernel_size = 4\n stride = 2\n padding = 1\n bias = False\n\n self.num_gpus = num_gpus\n\n self.network = nn.Sequential(\n # input is image\n nn.Conv2d(n_in, feature_map, kernel_size, stride, padding, bias=bias),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = feature_map * 2\n nn.Conv2d(feature_map, feature_map * 2, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = feature_map * 4\n nn.Conv2d(feature_map * 2, feature_map * 4, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 4),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = feature_map * 8\n nn.Conv2d(feature_map * 4, feature_map * 8, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 8),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = 1\n nn.Conv2d(feature_map * 8, n_out, kernel_size, 1, 0, bias=bias),\n nn.Sigmoid()\n )",
"def _build(self, generation):\n with tf.variable_scope ('discriminator') as scope:\n \n image_unflatten = unflatten_layer ( self.images )\n gen_unflatten = unflatten_layer ( generation )\n\n # Conv Layer 1 - image\n conv1_out_image, params = conv_2d_layer (\n input = image_unflatten,\n neurons = CONV_1_N,\n filter_size = CONV_1_FILT,\n name = 'conv_1_img',\n visualize = True ) \n pool1_out_img = max_pool_2d_layer ( input = conv1_out_image, name = 'pool_1_img')\n lrn1_out_img = local_response_normalization_layer (pool1_out_img, name = 'lrn_1_img' ) \n \n # Conv Layer 1 - gen\n conv1_out_gen, params = conv_2d_layer (\n input = gen_unflatten,\n neurons = CONV_1_N,\n filter_size = CONV_1_FILT,\n params = params,\n name = 'conv_1_gen',\n visualize = False )\n\n pool1_out_gen = max_pool_2d_layer ( input = conv1_out_gen, name = 'pool_1_gen')\n lrn1_out_gen = local_response_normalization_layer (pool1_out_gen, name = 'lrn_1_gen' ) \n process_params(params, name = self.name)\n c1_params = params\n\n\n\n\n\n # Conv Layer 2 - image\n conv2_out_image, params = conv_2d_layer (\n input = lrn1_out_img,\n neurons = CONV_2_N,\n filter_size = CONV_2_FILT,\n name = 'conv_2_img' )\n\n pool2_out_img = max_pool_2d_layer ( input = conv2_out_image, name = 'pool_2_img')\n lrn2_out_img = local_response_normalization_layer (pool2_out_img, name = 'lrn_2_img' ) \n\n\n # Conv Layer 2 - gen\n conv2_out_gen, params = conv_2d_layer (\n input = lrn1_out_gen,\n neurons = CONV_2_N,\n filter_size = CONV_2_FILT,\n params = params,\n name = 'conv_2_gen' )\n\n pool2_out_gen = max_pool_2d_layer ( input = conv2_out_gen, name = 'pool_2_gen')\n lrn2_out_gen = local_response_normalization_layer (pool2_out_gen, name = 'lrn_2_gen' ) \n process_params(params, name = self.name)\n c2_params = params\n\n # Dropout Layer\n flat_gen = flatten_layer(lrn2_out_gen)\n flat_img = flatten_layer(lrn2_out_img)\n\n flat_gen_dropout = dropout_layer ( input = flat_gen,\n prob = self.dropout_prob,\n name = 'dropout_1_gen') \n\n flat_img_dropout = dropout_layer ( input = flat_img,\n prob = self.dropout_prob,\n name = 'dropout_1_img') \n\n\n\n # Dot Product Layer 1 -img\n fc1_out_img, params = dot_product_layer ( input = flat_img_dropout,\n neurons = HIDDEN_1,\n name = 'image_disc_dot_1')\n # Dot Product Layer 1 - gen\n fc1_out_gen, params = dot_product_layer ( input = flat_gen_dropout,\n params = params,\n neurons = HIDDEN_2,\n name = 'gen_disc_dot_1')\n\n process_params(params, name = self.name)\n d1_params = params\n \n ##\n fc1_out_gen_dropout = dropout_layer ( input = fc1_out_gen,\n prob = self.dropout_prob,\n name = 'dropout_2_gen') \n fc1_out_img_dropout = dropout_layer ( input = fc1_out_img,\n prob = self.dropout_prob,\n name = 'dropout_2_img')\n\n # Dot Product Layer 2 -img\n fc2_out_img, params = dot_product_layer ( input = fc1_out_img_dropout,\n neurons = HIDDEN_2,\n name = 'image_disc_dot_2')\n # Dot Product Layer 2 - gen\n fc2_out_gen, params = dot_product_layer ( input = fc1_out_gen_dropout,\n params = params,\n neurons = HIDDEN_2,\n name = 'gen_disc_dot_2')\n process_params(params, name = self.name)\n d2_params = params\n\n ##\n fc2_out_gen_dropout = dropout_layer ( input = fc2_out_gen,\n prob = self.dropout_prob,\n name = 'dropout_3_gen') \n fc2_out_img_dropout = dropout_layer ( input = fc2_out_img,\n prob = self.dropout_prob,\n name = 'dropout_3_img')\n\n # Dot Product Layer 1 -img\n self.real, params = dot_product_layer ( input = fc2_out_img_dropout,\n neurons = 1,\n activation = 'sigmoid',\n name = 'real')\n # Dot Product Layer 1 -gen\n self.fake, params = dot_product_layer ( input = fc2_out_gen_dropout,\n params = params,\n neurons = 1,\n activation = 'sigmoid',\n name = 'fake')\n\n process_params(params, name = self.name)\n d3_params = params\n self.params = [c1_params, c2_params, d1_params, d2_params, d3_params] \n\n\n with tf.variable_scope (self.name + '_objectives') as scope: \n with tf.variable_scope( self.name + 'discriminator_obj') as scope: \n # discriminator_obj = - 0.5 * tf.reduce_mean(log(self.real)) - \\\n # 0.5 * tf.reduce_mean(log(1-self.fake))\n discriminator_obj = 0.5 * tf.reduce_mean ((self.real-1)**2) + \\\n 0.5 * tf.reduce_mean ((self.fake)**2)\n tf.summary.scalar('discriminator_obj', discriminator_obj)\n tf.add_to_collection( self.name + '_objectives', discriminator_obj ) \n\n with tf.variable_scope (self.name + '_probabilites') as scope: \n tf.summary.scalar('fake_probability', tf.reduce_mean(self.fake))\n tf.summary.scalar('real_probability', tf.reduce_mean(self.real))\n \n self._cook_optimizer( \n lr = DIS_GAN_LR, \n optimizer = DIS_GAN_OPTIMIZER,\n l1_coeff = DIS_GAN_L1_COEFF,\n l2_coeff = DIS_GAN_WEIGHT_DECAY_COEFF)",
"def _init_predictor(self):\n self.conv_cls_prev = self._init_branch(\n conv_channels=self.cls_branch,\n conv_strides=(1, ) * len(self.cls_branch))\n self.conv_cls = nn.Conv2d(self.cls_branch[-1], self.cls_out_channels,\n 1)\n # init regression head\n self.conv_reg_prevs = nn.ModuleList()\n # init output head\n self.conv_regs = nn.ModuleList()\n # group_reg_dims:\n # ((4, ), (2, ), (20, ), (3, ), (3, ), (8, 8), (1, ), (1, ))\n for i in range(len(self.group_reg_dims)):\n reg_dims = self.group_reg_dims[i]\n reg_branch_channels = self.reg_branch[i]\n out_channel = self.out_channels[i]\n reg_list = nn.ModuleList()\n if len(reg_branch_channels) > 0:\n self.conv_reg_prevs.append(\n self._init_branch(\n conv_channels=reg_branch_channels,\n conv_strides=(1, ) * len(reg_branch_channels)))\n for reg_dim in reg_dims:\n reg_list.append(nn.Conv2d(out_channel, reg_dim, 1))\n self.conv_regs.append(reg_list)\n else:\n self.conv_reg_prevs.append(None)\n for reg_dim in reg_dims:\n reg_list.append(nn.Conv2d(self.feat_channels, reg_dim, 1))\n self.conv_regs.append(reg_list)",
"def build_graph(self):\n n_classes = self.n_classes\n\n (self.feed('data')\n .conv(3, 3, 64, 1, 1, name='conv1_1', trainable=False)\n .conv(3, 3, 64, 1, 1, name='conv1_2', trainable=False)\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool1')\n .conv(3, 3, 128, 1, 1, name='conv2_1', trainable=False)\n .conv(3, 3, 128, 1, 1, name='conv2_2', trainable=False)\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool2')\n .conv(3, 3, 256, 1, 1, name='conv3_1')\n .conv(3, 3, 256, 1, 1, name='conv3_2')\n .conv(3, 3, 256, 1, 1, name='conv3_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool3')\n .conv(3, 3, 512, 1, 1, name='conv4_1')\n .conv(3, 3, 512, 1, 1, name='conv4_2')\n .conv(3, 3, 512, 1, 1, name='conv4_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool4')\n .conv(3, 3, 512, 1, 1, name='conv5_1')\n .conv(3, 3, 512, 1, 1, name='conv5_2')\n .conv(3, 3, 512, 1, 1, name='conv5_3'))\n\n self.compute_rDeRF() # dummy\n\n # Classification\n (self.feed('conv5_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool6')\n .reshape(shape=(-1, 7, 7, 512), name='pool6_reshape')\n .fc(4096, name='fc6')\n .dropout(0.5, name='drop6')\n .fc(4096, name='fc7')\n .dropout(0.5, name='drop7')\n # .make_time(name='drop7_reduced')\n .fc(n_classes, relu=False, name='cls_score')\n .softmax(name='cls_prob'))\n pass",
"def build_discriminator():\n leakyrelu_alpha = 0.2\n momentum = 0.8\n input_shape = (256, 256, 3)\n\n input_layer = Input(shape=input_shape)\n\n # Add the first convolution block\n dis1 = Conv2D(filters=64, kernel_size=3, strides=1, padding='same')(input_layer)\n dis1 = LeakyReLU(alpha=leakyrelu_alpha)(dis1)\n\n # Add the 2nd convolution block\n dis2 = Conv2D(filters=64, kernel_size=3, strides=2, padding='same')(dis1)\n dis2 = LeakyReLU(alpha=leakyrelu_alpha)(dis2)\n dis2 = BatchNormalization(momentum=momentum)(dis2)\n\n # Add the third convolution block\n dis3 = Conv2D(filters=128, kernel_size=3, strides=1, padding='same')(dis2)\n dis3 = LeakyReLU(alpha=leakyrelu_alpha)(dis3)\n dis3 = BatchNormalization(momentum=momentum)(dis3)\n\n # Add the fourth convolution block\n dis4 = Conv2D(filters=128, kernel_size=3, strides=2, padding='same')(dis3)\n dis4 = LeakyReLU(alpha=leakyrelu_alpha)(dis4)\n dis4 = BatchNormalization(momentum=0.8)(dis4)\n\n # Add the fifth convolution block\n dis5 = Conv2D(256, kernel_size=3, strides=1, padding='same')(dis4)\n dis5 = LeakyReLU(alpha=leakyrelu_alpha)(dis5)\n dis5 = BatchNormalization(momentum=momentum)(dis5)\n\n # Add the sixth convolution block\n dis6 = Conv2D(filters=256, kernel_size=3, strides=2, padding='same')(dis5)\n dis6 = LeakyReLU(alpha=leakyrelu_alpha)(dis6)\n dis6 = BatchNormalization(momentum=momentum)(dis6)\n\n # Add the seventh convolution block\n dis7 = Conv2D(filters=512, kernel_size=3, strides=1, padding='same')(dis6)\n dis7 = LeakyReLU(alpha=leakyrelu_alpha)(dis7)\n dis7 = BatchNormalization(momentum=momentum)(dis7)\n\n # Add the eight convolution block\n dis8 = Conv2D(filters=512, kernel_size=3, strides=2, padding='same')(dis7)\n dis8 = LeakyReLU(alpha=leakyrelu_alpha)(dis8)\n dis8 = BatchNormalization(momentum=momentum)(dis8)\n\n # Add a dense layer\n #avgd = keras.layers.AveragePooling2D(pool_size=(4,4) , strides = (4,4))(dis8)\n\n #flat = keras.layers.Flatten()(dis8)\n dis9 = Dense(units=1024)(dis8)\n dis9 = LeakyReLU(alpha=0.2)(dis9)\n\n # Last dense layer - for classification\n output = Dense(units=1, activation='sigmoid')(dis9)\n\n model = Model(inputs=[input_layer], outputs=[output], name='discriminator')\n return model",
"def build_discriminator2D(self, model_shape, filters=32, k_size=4, drop=False, rate=0.5, extra_conv=False, summary=False, ln=False, model_file=None, name='gan_d_'):\n if (model_file):\n \"\"\"\n Load pretreined model\n \"\"\"\n model = self.utils.build_pretrained_model(model_file)\n if (summary):\n model.summary()\n return model\n else:\n \"\"\"\n Create a Discriminator Model using hyperparameters values defined as follows\n \"\"\"\n n_rows = model_shape[0]\n n_cols = model_shape[1]\n c_dims = model_shape[2]\n\n input_shape = (n_rows, n_cols, c_dims) \n input_layer = Input(shape=input_shape, name=name+'input')\n\n d = self.Conv2D_Block(input_layer, filters, k_size=k_size, name=name+'1', bn=False) # 30x30x32\n d = self.Conv2D_Block(d, 2*filters, k_size=k_size, ln=ln, name=name+'2') # 15x15x64\n d = self.Conv2D_Block(d, 4*filters, k_size=k_size, ln=ln, name=name+'3') # 8x8x128\n if extra_conv:\n d = self.Conv2D_Block(d, 8*filters, strides=2, k_size=k_size, ln=ln, name=name+'4') # 8x8x256 \n d = self.Conv2D_Block(d, 16*filters, strides=1, k_size=k_size, ln=ln, name=name+'5') # 8x8x256\n else:\n d = self.Conv2D_Block(d, 8*filters, strides=1, k_size=k_size, ln=ln, name=name+'4')\n\n d = Flatten(name=name+'flatten')(d)\n if drop:\n d = Dropout(rate=rate, name=name+'dropout')(d, training=True)\n logits = Dense(1, activation='linear', kernel_initializer=RandomNormal(stddev=0.02), name=name+'dense')(d)\n out = Activation('sigmoid', name=name+'sigmoid')(logits)\n\n model = Model(inputs=[input_layer], outputs=[out, logits], name='Discriminator')\n if (summary):\n model.summary()\n return model",
"def _model_definition(self, net):\n \n # Input filtering and downsampling with max pooling\n print(net.shape) #channels must be specified first otherwise keras assumes channels last\n print('resnet17_scp')\n \n net = Conv2D( filters=128, kernel_size=5, activation=None, padding='same', \n data_format=\"channels_first\", input_shape=(1, 100, 100))(net)\n net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels\n net = LeakyReLU()(net)\n net= MaxPooling2D(pool_size=(2,2))(net)\n \n net = Conv2D( filters=64, kernel_size=3, activation=None, padding='same', data_format=\"channels_first\")(net)\n net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels\n net = LeakyReLU()(net)\n net= MaxPooling2D(pool_size=(2,2))(net)\n \n net = Conv2D( filters=64, kernel_size=3,activation=None, padding='same', data_format=\"channels_first\")(net)\n net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels \n net = LeakyReLU()(net)\n net= MaxPooling2D(pool_size=(2,2))(net)\n \n\n\n \n return net"
] | [
"0.66957885",
"0.66077316",
"0.63622165",
"0.6342879",
"0.633063",
"0.6302211",
"0.6302211",
"0.62979877",
"0.6269781",
"0.62534744",
"0.623655",
"0.62052697",
"0.61675453",
"0.61575735",
"0.61488783",
"0.61291873",
"0.61201566",
"0.61011446",
"0.60958564",
"0.6083565",
"0.60602874",
"0.60557455",
"0.60557455",
"0.60513604",
"0.6042773",
"0.6036498",
"0.60193926",
"0.60167605",
"0.60156554",
"0.5968898"
] | 0.7864113 | 0 |
Used to build the age branch of our face recognition network. This branch is composed of three Conv > BN > Pool > Dropout blocks, followed by the Dense output layer. | def build_age_branch(self, inputs):
x = self.make_default_hidden_layers(inputs)
x = Flatten()(x)
x = Dense(128)(x)
x = Activation("relu")(x)
x = BatchNormalization()(x)
x = Dropout(0.5)(x)
x = Dense(1)(x)
x = Activation("linear", name="age_output")(x)
return x | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def discriminator_block(in_filters, out_filters):\n layers = [ nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1),\n nn.LeakyReLU(0.01)]\n return layers",
"def build_bisenet(inputs, num_classes):\n\n ### The spatial path\n ### The number of feature maps for each convolution is not specified in the paper\n ### It was chosen here to be equal to the number of feature maps of a classification\n ### model at each corresponding stage\n # spatial_net = fluid.layers.resize_bilinear(inputs, [Image_Height/8, Image_Width/8])\n # print('spatial_net_1',spatial_net)\n\n ## spatial path\n spatial_net = ConvBlock(inputs, num_filters=64, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=128, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=256, kernel_size=3, stride=2)\n # print(\"spatial_net:\", spatial_net)\n\n # spatial_net = fluid.layers.resize_bilinear(spatial_net, [Image_Height/8, Image_Width/8])\n # print('spatial_net_2',spatial_net)\n ### Context path\n model = ResNet(is_test=False)\n # spatial_net = model.bottleneck_block1(inputs)\n end_points_16, end_points_32 = model.net(inputs)\n net_4 = AttentionRefinementModule(end_points_16, num_filters=512)\n net_5 = AttentionRefinementModule(end_points_32, num_filters=1024)\n global_channels = fluid.layers.reduce_mean(net_5, [2, 3], keep_dim=True)\n net_5_scaled = fluid.layers.elementwise_mul(net_5, global_channels, axis=0)\n\n ### Combining the paths\n net_4 = Upsample(net_4, scale=2)\n net_5_scaled = Upsample(net_5_scaled, scale=4)\n # print('net_4, net_5:', [net_4, net_5_scaled])\n # layers_concat = list()\n # layers_concat.append(spatial_net)\n ## layers_concat.append(net_4)\n # layers_concat.append(net_5_scaled)\n context_net = fluid.layers.concat([spatial_net, net_4, net_5_scaled], axis=1) #\n # context_net = fluid.layers.concat(input=layers_concat,axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([net_4, net_5_scaled], axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([spatial_net,context_net], axis=1)\n # print('context_net2',context_net)\n\n ### FFM\n # net = FeatureFusionModule(input_1=spatial_net, input_2=context_net, num_filters=num_classes)\n net = FeatureFusionModule(inputs=context_net, num_filters=num_classes)\n\n # print('net', net)\n\n ## [batch_zize, num_filters, 128, 64]\n\n ### Final upscaling and finish\n # net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 128])\n # print('conv2d_transpose', net)\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn1')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[128, 256])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn2')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 512])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn3')\n #net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[512, 1024])\n #net = batch_normalization(net, relu=True, name='conv2d_transpose_bn4')\n # print('net',net)\n net = fluid.layers.image_resize(net, out_shape=[512, 1024], resample='BILINEAR')\n\n net = fluid.layers.conv2d(net, num_classes, 1)\n return net",
"def __init__(self, kernel_size, filters, stage, block):\n super().__init__(name='identity' + str(stage) + block)\n filters1, filters2, filters3 = filters\n if K.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n self.conv1 = layers.Conv2D(\n filters1, (1, 1),\n kernel_initializer='he_normal',\n name=conv_name_base + '2a')\n self.bn1 = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')\n self.act1 = layers.Activation('relu')\n\n self.conv2 = layers.Conv2D(\n filters2,\n kernel_size,\n padding='same',\n kernel_initializer='he_normal',\n name=conv_name_base + '2b')\n self.bn2 = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')\n self.act2 = layers.Activation('relu')\n\n self.conv3 = layers.Conv2D(\n filters3, (1, 1),\n kernel_initializer='he_normal',\n name=conv_name_base + '2c')\n self.bn3 = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')\n\n self.add = layers.Add()\n self.act = layers.Activation('relu')",
"def construct_gumbel_selector(X_ph, num_words, embedding_dims, maxlen):\n emb_layer = Embedding(num_words, embedding_dims, input_length = maxlen, name = 'emb_gumbel')\n emb = emb_layer(X_ph) #(400, 50) \n net = Dropout(0.2, name = 'dropout_gumbel')(emb)\n net = emb\n first_layer = Conv1D(100, kernel_size, padding='same', activation='relu', strides=1, name = 'conv1_gumbel')(net) # bs, 400, 100\n\n # global info\n net_new = GlobalMaxPooling1D(name = 'new_global_max_pooling1d_1')(first_layer) # bs, 100\n global_info = Dense(100, name = 'new_dense_1', activation='relu')(net_new) # bs, 100\n\n # local info\n net = Conv1D(100, 3, padding='same', activation='relu', strides=1, name = 'conv2_gumbel')(first_layer) # bs, 400, 100\n local_info = Conv1D(100, 3, padding='same', activation='relu', strides=1, name = 'conv3_gumbel')(net) # bs, 400, 100\n combined = Concatenate()([global_info,local_info]) \n net = Dropout(0.2, name = 'new_dropout_2')(combined)\n net = Conv1D(100, 1, padding='same', activation='relu', strides=1, name = 'conv_last_gumbel')(net) \n\n logits_T = Conv1D(1, 1, padding='same', activation=None, strides=1, name = 'conv4_gumbel')(net) # bs, 400, 1\n # wanna make it bs, maxlen*num_groups\n squeeze_layer = Lambda(lambda x:tf.squeeze(x), output_shape=lambda x:x[:-1])\n\n logits_T_grp = Dense(maxlen*num_groups)(squeeze_layer(logits_T))\n #print(logits_T_grp.shape)\n return logits_T_grp # bs, 400* num_groups",
"def VGGFace(input_shape=(224, 224, 3), n_classes=10, include_top=True):\n # Create the Tensor\n input = Input(shape=input_shape)\n\n # Block 1\n # 1st Convolutional Layer\n x = Conv2D(64, (3, 3), strides=(1, 1), padding='same', name='block1_conv1')(input)\n x = Activation('relu', name='block1_relu1')(x)\n\n # 2nd Convolutional Layer\n x = Conv2D(64, (3, 3), strides=(1, 1), padding='same', name='block1_conv2')(x)\n x = Activation('relu', name='block1_relu2')(x)\n x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block1_pool')(x)\n\n # Block 2\n # 3rd Convolutional Layer\n x = Conv2D(128, (3, 3), strides=(1, 1), padding='same', name='block2_conv1')(x)\n x = Activation('relu', name='block2_relu1')(x)\n\n # 4th Convolutional Layer\n x = Conv2D(128, (3, 3), strides=(1, 1), padding='same', name='block2_conv2')(x)\n x = Activation('relu', name='block2_relu2')(x)\n x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block2_pool')(x)\n\n # Block 3\n # 5th Convolutional Layer\n x = Conv2D(256, (3, 3), strides=(1, 1), padding='same', name='block3_conv1')(x)\n x = Activation('relu', name='block3_relu1')(x)\n\n # 6th Convolutional Layer\n x = Conv2D(256, (3, 3), strides=(1, 1), padding='same', name='block3_conv2')(x)\n x = Activation('relu', name='block3_relu2')(x)\n\n # 7th Convolutional Layer\n x = Conv2D(256, (3, 3), strides=(1, 1), padding='same', name='block3_conv3')(x)\n x = Activation('relu', name='block3_relu3')(x)\n x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block3_pool')(x)\n\n # Block 4\n # 8th Convolutional Layer\n x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='block4_conv1')(x)\n x = Activation('relu', name='block4_relu1')(x)\n\n # 9th Convolutional Layer\n x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='block4_conv2')(x)\n x = Activation('relu', name='block4_relu2')(x)\n\n # 10th Convolutional Layer\n x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='block4_conv3')(x)\n x = Activation('relu', name='block4_relu3')(x)\n x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block4_pool')(x)\n\n # Block 5\n # 11th Convolutional Layer\n x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='block5_conv1')(x)\n x = Activation('relu', name='block5_relu1')(x)\n\n # 12th Convolutional Layer\n x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='block5_conv2')(x)\n x = Activation('relu', name='block5_relu2')(x)\n\n # 13th Convolutional Layer\n x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='block5_conv3')(x)\n x = Activation('relu', name='block5_relu3')(x)\n x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block5_pool')(x)\n\n # Block 6\n # 14th Convulation Layer\n x = Conv2D(4096, (7, 7), strides=(1, 1), name='fc1_conv1')(x)\n x = Activation('relu', name='fc1_relu1')(x)\n x = Dropout(0.5)(x)\n\n # 15th Convulation Layer\n x = Conv2D(4096, (1, 1), strides=(1, 1), name='fc2_conv1')(x)\n x = Activation('relu', name='fc2_relu2')(x)\n x = Dropout(0.5, name='fc2_dropout')(x)\n\n # 16th Convulation Layer\n x = Conv2D(2622, (1, 1), strides=(1, 1), name='fc3_conv1')(x)\n x = Flatten(name='fc3_flatten')(x)\n\n if include_top:\n # Output Layer\n x = Activation('softmax', name='predictions_softmax')(x)\n\n # Create model\n model = keras.models.Model(input, x, name='vggface')\n return model",
"def LadderNet(input_size = (256, 256, 1), num_classes=2, filters=30): \n \n # X's denote standard flow\n # XNUM denote ResBlock outputs\n \n # \"First\" UNet\n \n # Input branch\n inputs = Input(input_size)\n X = Conv2D(filters=filters, kernel_size=3, activation=\"relu\", padding = 'same', kernel_initializer = 'he_normal')(inputs)\n\n # Down branch\n X1 = ResBlock(input_tensor=X, filters=filters) # ResBlock located in the first layer of the paper scheme\n X = Conv2D(filters=filters*2, kernel_size=3, strides=2, kernel_initializer='he_normal')(X1) \n X = Activation(\"relu\")(X) # This ReLU is not shown in the paper scheme\n \n X2 = ResBlock(input_tensor=X, filters=filters*2)\n X = Conv2D(filters=filters*4, kernel_size=3, strides=2, kernel_initializer='he_normal')(X2)\n X = Activation(\"relu\")(X)\n \n X3 = ResBlock(input_tensor=X, filters=filters*4)\n X = Conv2D(filters=filters*8, kernel_size=3, strides=2, kernel_initializer='he_normal')(X3)\n X = Activation(\"relu\")(X)\n \n X4 = ResBlock(input_tensor=X, filters=filters*8)\n X = Conv2D(filters=filters*16, kernel_size=3, strides=2, kernel_initializer='he_normal')(X4)\n X = Activation(\"relu\")(X)\n \n # Bottom block \n X = ResBlock(input_tensor=X, filters=filters*16)\n \n # Up branch\n X = Conv2DTranspose(filters=filters*8, kernel_size=3, strides=2, kernel_initializer='he_normal')(X)\n X = Add()([X, X4])\n # X = Activation(\"relu\")(X) # This ReLU is commented in the paper code\n X5 = ResBlock(input_tensor=X, filters=filters*8)\n \n X = Conv2DTranspose(filters=filters*4, kernel_size=3, strides=2, kernel_initializer='he_normal')(X5)\n X = Add()([X, X3])\n # X = Activation(\"relu\")(X)\n X6 = ResBlock(input_tensor=X, filters=filters*4)\n \n X = Conv2DTranspose(filters=filters*2, kernel_size=3, strides=2, kernel_initializer='he_normal')(X6)\n X = Add()([X, X2])\n # X = Activation(\"relu\")(X)\n X7 = ResBlock(input_tensor=X, filters=filters*2)\n \n X = Conv2DTranspose(filters=filters, kernel_size=3, strides=2, output_padding=1, kernel_initializer='he_normal')(X7)\n X = Add()([X, X1])\n # X = Activation(\"relu\")(X)\n X = ResBlock(input_tensor=X, filters=filters)\n \n # Top block (bottle-neck)\n X8 = ResBlock(input_tensor=X, filters=filters)\n X = ResBlock(input_tensor=X, filters=filters)\n X = Add()([X, X8])\n \n # \"Second\" UNet\n \n # Down branch\n X9 = ResBlock(input_tensor=X, filters=filters)\n X = Conv2D(filters=filters*2, kernel_size=3, strides=2, kernel_initializer='he_normal')(X) \n X = Activation(\"relu\")(X)\n X = Add()([X7, X]) \n \n X10 = ResBlock(input_tensor=X, filters=filters*2)\n X = Conv2D(filters=filters*4, kernel_size=3, strides=2, kernel_initializer='he_normal')(X) \n X = Activation(\"relu\")(X) \n X = Add()([X6, X])\n \n X11 = ResBlock(input_tensor=X, filters=filters*4)\n X = Conv2D(filters=filters*8, kernel_size=3, strides=2, kernel_initializer='he_normal')(X) \n X = Activation(\"relu\")(X)\n X = Add()([X5, X])\n\n X12 = ResBlock(input_tensor=X, filters=filters*8)\n X = Conv2D(filters=filters*16, kernel_size=3, strides=2, kernel_initializer='he_normal')(X) \n X = Activation(\"relu\")(X)\n \n # Bottom block\n X = ResBlock(input_tensor=X, filters=filters*16)\n \n # Up branch\n X = Conv2DTranspose(filters=filters*8, kernel_size=3, strides=2, kernel_initializer='he_normal')(X)\n X = Add()([X, X12]) \n # X = Activation(\"relu\")(X)\n X = ResBlock(input_tensor=X, filters=filters*8)\n \n X = Conv2DTranspose(filters=filters*4, kernel_size=3, strides=2, kernel_initializer='he_normal')(X)\n X = Add()([X, X11])\n # X = Activation(\"relu\")(X)\n X = ResBlock(input_tensor=X, filters=filters*4)\n \n X = Conv2DTranspose(filters=filters*2, kernel_size=3, strides=2, kernel_initializer='he_normal')(X)\n X = Add()([X, X10])\n # X = Activation(\"relu\")(X)\n X = ResBlock(input_tensor=X, filters=filters*2)\n \n X = Conv2DTranspose(filters=filters, kernel_size=3, strides=2, kernel_initializer='he_normal', output_padding=1)(X)\n X = Add()([X, X9])\n # X = Activation(\"relu\")(X)\n X = ResBlock(input_tensor=X, filters=filters)\n \n # Final block\n X = Conv2D(filters=num_classes, kernel_size=1, kernel_initializer='he_normal')(X)\n # X = Activation(\"relu\")(X)\n X = Activation(\"softmax\")(X)\n #X = Conv2D(1, 1)(X)\n \n model = Model(inputs, X)\n \n \n return model",
"def build_discriminator(self):\n img_shape = (self.img_size[0], self.img_size[1], self.channels)\n\n model = Sequential()\n ###############\n # Conv Stack 1:\n ###############\n model.add(\n Conv2D(128, kernel_size=5, strides=2, input_shape=img_shape, padding=\"same\")\n ) # 128x128 -> 64x64\n\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.2))\n\n ###############\n # Conv Stack 2:\n ###############\n model.add(\n Conv2D(128, kernel_size=5, strides=2, padding=\"same\")\n ) # 64x64 -> 32x32\n # model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))\n\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.25))\n\n ###############\n # Conv Stack 3:\n ###############\n model.add(\n Conv2D(128, kernel_size=4, strides=2, padding=\"same\")\n ) # 32x32 -> 16x16\n\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.25))\n\n ###############\n # Conv Stack 4:\n ###############\n model.add(Conv2D(128, kernel_size=4, strides=1, padding=\"same\")) # 16x16 -> 8x8\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.25))\n\n ###############\n # Conv Stack 5:\n ###############\n model.add(Conv2D(128, kernel_size=3, strides=1, padding=\"same\")) # 8x8 -> 4x4\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Dropout(0.4))\n\n model.add(Flatten())\n model.add(Dense(1, activation=\"sigmoid\")) # important binary classification.\n\n model.summary()\n\n # Model require Pair.\n img = Input(shape=img_shape)\n validity = model(img)\n\n return Model(img, validity)",
"def predict_age(input_path:str):\r\n #Initialize dlib face detector using the facial landmark recognition\r\n detector, predictor = initialize_dlib(facial_landmark_predictor=FACIAL_LANDMARK_PREDICTOR)\r\n\r\n #Load age prediction model\r\n age_net = load_caffe_models(age_model=AGE_MODEL, age_proto = AGE_PROTO)\r\n\r\n # Initialize frame size\r\n frame_width = 640\r\n frame_height = 360\r\n\r\n # Read Input Image\r\n img = cv2.imread(input_path)\r\n\r\n # Take a copy of the initial image and resize it\r\n frame = img.copy()\r\n frame = imutils.resize(img, width=frame_width, height=frame_height)\r\n\r\n # Convert it to gray scale\r\n gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n\r\n # Detect faces in the gray scale frame\r\n faces = detector(gray_frame, 0)\r\n\r\n # Loop over the faces detected\r\n for idx, face in enumerate(faces):\r\n print(\"Detection Face ID = {} - Position = Left:{} Top:{} Right:{} Botton:{}\".format((idx+1), face.left(), face.top(), face.right(),\r\n face.bottom()))\r\n\r\n #Draw the face bounding box\r\n (x,y,w,h) = face_utils.rect_to_bb(face)\r\n startX , startY , endX , endY = x,y,(x+w),(y+h)\r\n face_img = cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 255, 0), 2)\r\n\r\n #Retrieve face\r\n # Determine the facial landmarks for the face region\r\n #shape = predictor(gray_frame, face)\r\n # Convert the facial landmark (x, y) coordinates to a NumPy array\r\n #shape = face_utils.shape_to_np(shape)\r\n # Extract the ROI of the face region as a separate image\r\n #(x, y, w, h) = cv2.boundingRect(np.array([shape]))\r\n #roi = img[y:y + h, x:x + w]\r\n #display_img(\"face\", roi)\r\n\r\n # image --> Input image to preprocess before passing it through our dnn for classification.\r\n blob = cv2.dnn.blobFromImage(image= face_img\r\n , scalefactor=1.0\r\n , size=(227, 227)\r\n , mean=MODEL_MEAN_VALUES\r\n , swapRB=False\r\n , crop=False)\r\n # Predict Age\r\n age_net.setInput(blob)\r\n age_preds = age_net.forward()\r\n i = age_preds[0].argmax()\r\n age = AGE_INTERVALS[i]\r\n age_confidence_score = age_preds[0][i]\r\n\r\n #print('shape' ,img.shape)\r\n\r\n #Draw the box\r\n label = \"Age{}-{:.2f}%\".format(age,age_confidence_score*100)\r\n print(label)\r\n\r\n #yPos = endY + 25\r\n yPos = startY - 15\r\n while yPos < 15:\r\n yPos += 15\r\n #print(yPos)\r\n optimal_font_scale = get_optimal_font_scale(label,((endX-startX)+25))\r\n cv2.rectangle(face_img, (startX, startY), (endX, endY), (0, 255, 0), 2)\r\n cv2.putText(face_img, label, (startX, yPos), cv2.FONT_HERSHEY_SIMPLEX, optimal_font_scale , (0, 255, 0), 2)\r\n #Display processed image\r\n display_img('Age Estimator', face_img)\r\n\r\n # Cleanup\r\n cv2.destroyAllWindows()",
"def darknet_body():\n return compose(DarknetConv2D_BN_Leaky(32, (3, 3)), MaxPooling2D(), DarknetConv2D_BN_Leaky(64, (3, 3)),\n MaxPooling2D(), bottleneck_block(128, 64), MaxPooling2D(), bottleneck_block(256, 128),\n MaxPooling2D(), bottleneck_x2_block(512, 256), MaxPooling2D(), bottleneck_x2_block(1024, 512))",
"def build_model(self):\n \n start_time = time.time()\n print(\"build model started\")\n # label\n self.FA = tf.placeholder(dtype=tf.int32, shape=[None])\n self.ges = tf.placeholder(dtype=tf.int32, shape=[None])\n self.obj = tf.placeholder(dtype=tf.int32, shape=[None])\n \n self.images = tf.placeholder(dtype=tf.float32, shape=[None, height, width, 3])\n batch_size = tf.shape(self.images)[0]\n rgb_scaled = self.images * 255.0\n\n # Convert RGB to BGR\n VGG_MEAN = [103.939, 116.779, 123.68]\n red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)\n assert red.get_shape().as_list()[1:] == [224, 224, 1]\n assert green.get_shape().as_list()[1:] == [224, 224, 1]\n assert blue.get_shape().as_list()[1:] == [224, 224, 1]\n bgr = tf.concat(axis=3, values=[\n blue - VGG_MEAN[0],\n green - VGG_MEAN[1],\n red - VGG_MEAN[2],\n ])\n assert bgr.get_shape().as_list()[1:] == [224, 224, 3]\n \n with tf.variable_scope(\"vgg19\"):\n self.conv1_1 = self.conv_layer(bgr, \"conv1_1\")\n self.conv1_2 = self.conv_layer(self.conv1_1, \"conv1_2\")\n self.pool1 = self.max_pool(self.conv1_2, 'pool1')\n\n self.conv2_1 = self.conv_layer(self.pool1, \"conv2_1\")\n self.conv2_2 = self.conv_layer(self.conv2_1, \"conv2_2\")\n self.pool2 = self.max_pool(self.conv2_2, 'pool2')\n\n self.conv3_1 = self.conv_layer(self.pool2, \"conv3_1\")\n self.conv3_2 = self.conv_layer(self.conv3_1, \"conv3_2\")\n self.conv3_3 = self.conv_layer(self.conv3_2, \"conv3_3\")\n self.conv3_4 = self.conv_layer(self.conv3_3, \"conv3_4\")\n self.pool3 = self.max_pool(self.conv3_4, 'pool3')\n\n self.conv4_1 = self.conv_layer(self.pool3, \"conv4_1\")\n self.conv4_2 = self.conv_layer(self.conv4_1, \"conv4_2\")\n self.conv4_3 = self.conv_layer(self.conv4_2, \"conv4_3\")\n self.conv4_4 = self.conv_layer(self.conv4_3, \"conv4_4\")\n self.pool4 = self.max_pool(self.conv4_4, 'pool4')\n\n self.conv5_1 = self.conv_layer(self.pool4, \"conv5_1\")\n self.conv5_2 = self.conv_layer(self.conv5_1, \"conv5_2\")\n self.conv5_3 = self.conv_layer(self.conv5_2, \"conv5_3\")\n self.conv5_4 = self.conv_layer(self.conv5_3, \"conv5_4\")\n self.pool5 = self.max_pool(self.conv5_4, 'pool5')\n\n \n shape = self.pool5.get_shape()\n size = 1\n for dim in shape[1:]:\n size *= dim.value\n \n # dense\n with tf.variable_scope('dense') as scope:\n # Move everything into depth so we can perform a single matrix multiply.\n reshape = tf.reshape(self.pool5, [-1, size])\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[size, 192]))\n biases = tf.get_variable('biases', [192], initializer=tf.constant_initializer(0.1))\n dense = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)\n\n\n # linear layer(WX + b),\n with tf.variable_scope('softmax_linear_FA') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 2]))\n biases = tf.get_variable('biases', [2], initializer=tf.constant_initializer(0.1))\n softmax_linear_FA = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_FA = tf.nn.softmax(softmax_linear_FA)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.FA, logits=softmax_linear_FA, name='cross_entropy')\n cross_entropy_mean_FA = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n with tf.variable_scope('softmax_linear_ges') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 13]))\n biases = tf.get_variable('biases', [13], initializer=tf.constant_initializer(0.1))\n softmax_linear_ges = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_ges = tf.nn.softmax(softmax_linear_ges)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.ges, logits=softmax_linear_ges, name='cross_entropy')\n cross_entropy_mean_ges = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n with tf.variable_scope('softmax_linear_obj') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 24]))\n biases = tf.get_variable('biases', [24], initializer=tf.constant_initializer(0.1))\n softmax_linear_obj = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_obj = tf.nn.softmax(softmax_linear_obj)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.obj, logits=softmax_linear_obj, name='cross_entropy')\n cross_entropy_mean_obj = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n self.loss = cross_entropy_mean_FA + cross_entropy_mean_ges + cross_entropy_mean_obj\n self.lr = tf.placeholder(tf.float32, [])\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n optimizer = tf.train.AdamOptimizer(self.lr)\n grads_and_vars = optimizer.compute_gradients(self.loss)\n self.train_op = optimizer.apply_gradients(grads_and_vars, global_step=self.global_step)\n self.data_dict = None\n print((\"build model finished: %ds\" % (time.time() - start_time)))",
"def model_build(self):\n\n # Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!\n X_input = Input(self.inputData[0].shape)\n\n '''\n # CONV -> BN -> RELU Block applied to X\n X = Conv2D(8, (8, 8), name='conv0')(X_input)\n X = BatchNormalization(name='bn0')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool0')(X)\n X = Dropout(0.1, name='dropout0')(X)\n\n X = Conv2D(16, (16, 16), name='conv1')(X)\n X = BatchNormalization(name='bn1')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool1')(X)\n X = Dropout(0.1, name='dropout1')(X)\n\n X = Conv2D(16, (32, 32), name='conv2')(X)\n X = BatchNormalization(name='bn2')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool2')(X)\n X = Dropout(0.1, name='dropout2')(X)\n' '''\n\n X = Dense(500, activation='relu', name='fc0')(X_input)\n X = Dropout(0.1, name='dropout1')(X)\n X = Dense(500, activation='relu', name='fc1')(X)\n X = Dropout(0.1, name='dropout2')(X)\n X = Dense(3, activation='softmax', name='fc2')(X)\n\n # Create model. This creates your Keras model instance, you'll use this instance to train/test the model.\n self.model = Model(inputs=X_input, outputs=X, name='acouModel')",
"def build_unet(input_layer = Input((128,128,3)), start_depth=64, activation='relu', initializer='he_normal'):\n\n # 128 -> 64\n conv1 = Conv2D_BN(input_layer, start_depth * 1, (3, 3), activation=activation, kernel_initializer=initializer)\n conv1 = Conv2D_BN(conv1, start_depth * 1, (3, 3), activation=activation, kernel_initializer=initializer)\n pool1 = MaxPooling2D((2, 2))(conv1)\n\n # 64 -> 32\n conv2 = Conv2D_BN(pool1, start_depth * 2, (3, 3), activation=activation, kernel_initializer=initializer)\n conv2 = Conv2D_BN(conv2, start_depth * 2, (3, 3), activation=activation, kernel_initializer=initializer)\n pool2 = MaxPooling2D((2, 2))(conv2)\n\n # 32 -> 16\n conv3 = Conv2D_BN(pool2, start_depth * 4, (3, 3), activation=activation, kernel_initializer=initializer)\n conv3 = Conv2D_BN(conv3, start_depth * 4, (3, 3), activation=activation, kernel_initializer=initializer)\n pool3 = MaxPooling2D((2, 2))(conv3)\n\n # 16 -> 8\n conv4 = Conv2D_BN(pool3, start_depth * 8, (3, 3), activation=activation, kernel_initializer=initializer)\n conv4 = Conv2D_BN(conv4, start_depth * 8, (3, 3), activation=activation, kernel_initializer=initializer)\n pool4 = MaxPooling2D((2, 2))(conv4)\n\n # Middle\n convm=cbam_block(pool4)\n\n # 8 -> 16\n deconv4 = Conv2DTranspose(convm, start_depth * 8, (3, 3), strides=(2, 2), activation=activation, kernel_initializer=initializer)\n uconv4 = concatenate([deconv4, conv4])\n uconv4 = Conv2D_BN(uconv4, start_depth * 8, (3, 3), activation=activation, kernel_initializer=initializer)\n uconv4 = Conv2D_BN(uconv4, start_depth * 8, (3, 3), activation=activation, kernel_initializer=initializer)\n\n # 16 -> 32\n deconv3 = Conv2DTranspose(uconv4, start_depth * 4, (3, 3), strides=(2, 2), activation=activation, kernel_initializer=initializer)\n uconv3 = concatenate([deconv3, conv3])\n uconv3 = Conv2D_BN(uconv3, start_depth * 4, (3, 3), activation=activation, kernel_initializer=initializer)\n uconv3 = Conv2D_BN(uconv3, start_depth * 4, (3, 3), activation=activation, kernel_initializer=initializer)\n\n # 32 -> 64\n deconv2 = Conv2DTranspose(uconv3, start_depth * 2, (3, 3), strides=(2, 2), activation=activation, kernel_initializer=initializer)\n uconv2 = concatenate([deconv2, conv2])\n uconv2 = Conv2D_BN(uconv2, start_depth * 2, (3, 3), activation=activation, kernel_initializer=initializer)\n uconv2 = Conv2D_BN(uconv2, start_depth * 2, (3, 3), activation=activation, kernel_initializer=initializer)\n\n # 64 -> 128\n deconv1 = Conv2DTranspose(uconv2, start_depth * 1, (3, 3), strides=(2, 2), activation=activation, kernel_initializer=initializer)\n uconv1 = concatenate([deconv1, conv1])\n uconv1 = Conv2D_BN(uconv1, start_depth * 1, (3, 3), activation=activation, kernel_initializer=initializer)\n uconv1 = Conv2D_BN(uconv1, start_depth * 1, (3, 3), activation=activation, kernel_initializer=initializer)\n\n output_layer = Conv2D(1, (1,1), padding=\"same\", activation=\"sigmoid\")(uconv1)\n\n return output_layer",
"def Unet4(shape, nb_filters=32, exp=1, kernel_size=3, initialization=\"glorot_uniform\", activation=\"relu\", sigma_noise=0, output_channels=1, drop=0.0, regularization=None):\n \n \n input_layer = Input(shape=shape)\n\n conv1 = ConvBlock(input_layer, nb_filters=nb_filters, kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n if drop > 0.0: pool1 = Dropout(drop)(pool1)\n\n conv2 = ConvBlock(pool1, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n if drop > 0.0: pool2 = Dropout(drop)(pool2)\n\n conv3 = ConvBlock(pool2, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n if drop > 0.0: pool3 = Dropout(drop)(pool3)\n\n conv4 = ConvBlock(pool3, nb_filters=nb_filters * 2 **(3 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n if drop > 0.0: pool4 = Dropout(drop)(pool4)\n\n deconv5 = DeconvBlock(conv4, residual=conv3, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv5 = Dropout(drop)(deconv5)\n\n deconv6 = DeconvBlock(deconv5, residual=conv2, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv6 = Dropout(drop)(deconv6)\n\n deconv7 = DeconvBlock(deconv6, residual=conv1, nb_filters=nb_filters, kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv7 = Dropout(drop)(deconv7)\n\n if sigma_noise > 0:\n deconv7 = GaussianNoise(sigma_noise)(deconv7)\n\n output_layer = Conv2D(filters=output_channels, kernel_size=(1, 1))(deconv7)\n output_layer = BatchNormalization()(output_layer)\n output_layer = Activation('softmax')(output_layer)\n\n model = Model(inputs=input_layer, outputs=output_layer, name='Unet')\n return model",
"def __conv_block(self, x, stage, branch, nb_filter, dropout_rate=None, weight_decay=1e-4):\n\t\teps = 1.1e-5\n\t\tconv_name_base = \"conv\" + str(stage) + \"_\" + str(branch)\n\t\trelu_name_base = \"relu\" + str(stage) + \"_\" + str(branch)\n\n\t\t# 1x1 Convolution (Bottleneck layer)\n\t\tinter_channel = nb_filter * 4 \n\t\tx = BatchNormalization(epsilon=eps, axis=self.concat_axis, name=conv_name_base+\"_x1_bn\")(x)\n\t\tx = Scale(axis=self.concat_axis, name=conv_name_base+\"_x1_scale\")(x)\n\t\tx = Activation(\"relu\", name=relu_name_base+\"_x1\")(x)\n\t\tx = Conv2D(inter_channel, (1, 1), name=conv_name_base+\"_x1\", use_bias=False)(x)\n\n\t\tif dropout_rate:\n\t\t\tx = Dropout(dropout_rate)(x)\n\n\t\t# 3x3 Convolution\n\t\tx = BatchNormalization(epsilon=eps, axis=self.concat_axis, name=conv_name_base+\"_x2_bn\")(x)\n\t\tx = Scale(axis=self.concat_axis, name=conv_name_base+\"_x2_scale\")(x)\n\t\tx = Activation(\"relu\", name=relu_name_base+\"_x2\")(x)\n\t\tx = ZeroPadding2D((1, 1), name=conv_name_base+\"_x2_zeropadding\")(x)\n\t\tx = Conv2D(nb_filter, (3, 3), name=conv_name_base+\"_x2\", use_bias=False)(x)\n\n\t\tif dropout_rate:\n\t\t\tx = Dropout(dropout_rate)(x)\n\n\t\treturn x",
"def build_vgg(self):\n # Get the vgg network. Extract features from Block 5, last convolution.\n vgg = tf.keras.applications.VGG19(weights=\"imagenet\", input_shape=self.hr_shape, include_top=False)\n vgg.trainable = False\n for layer in vgg.layers:\n layer.trainable = False\n\n # Create model and compile\n model = tf.keras.models.Model(inputs=vgg.input, outputs=vgg.get_layer(\"block5_conv4\").output)\n\n return model",
"def build(self):\n\n # bgr_ = bgr*255.0\n bgr_= self.X\n start_time = time.time()\n print(\"build model started\")\n\n # blue ,green, red = tf.split(axis=3, num_or_size_splits=3, value= bgr)\n red ,green, blue, = tf.split(axis=3, num_or_size_splits=3, value= bgr_)\n assert red.get_shape().as_list()[1:] == [224, 224, 1]\n assert green.get_shape().as_list()[1:] == [224, 224, 1]\n assert blue.get_shape().as_list()[1:] == [224, 224, 1]\n bgr = tf.concat(axis=3, values=[\n # blue - VGG_MEAN[0],\n # green - VGG_MEAN[1],\n # red - VGG_MEAN[2],\n\n red - VGG_MEAN[0],\n green - VGG_MEAN[1],\n blue - VGG_MEAN[2],\n ])\n assert bgr.get_shape().as_list()[1:] == [224, 224, 3]\n\n\n\n print(bgr.shape)\n\n self.conv1_1 = self.conv_layer(bgr, \"conv1_1\")\n self.conv1_2 = self.conv_layer(self.conv1_1, \"conv1_2\")\n self.pool1 = self.max_pool(self.conv1_2, 'pool1')\n\n self.conv2_1 = self.conv_layer(self.pool1, \"conv2_1\")\n self.conv2_2 = self.conv_layer(self.conv2_1, \"conv2_2\")\n self.pool2 = self.max_pool(self.conv2_2, 'pool2')\n\n\n\n\n self.conv3_1 = self.conv_layer(self.pool2, \"conv3_1\")\n self.conv3_2 = self.conv_layer(self.conv3_1, \"conv3_2\")\n self.conv3_3 = self.conv_layer(self.conv3_2, \"conv3_3\")\n self.pool3 = self.max_pool(self.conv3_3, 'pool3')\n\n self.conv4_1 = self.conv_layer(self.pool3, \"conv4_1\")\n self.conv4_2 = self.conv_layer(self.conv4_1, \"conv4_2\")\n self.conv4_3 = self.conv_layer(self.conv4_2, \"conv4_3\")\n self.pool4 = self.max_pool(self.conv4_3, 'pool4')\n\n\n\n\n\n self.conv5_1 = self.conv_layer(self.pool4, \"conv5_1\")\n self.conv5_2 = self.conv_layer(self.conv5_1, \"conv5_2\")\n self.conv5_3 = self.conv_layer(self.conv5_2, \"conv5_3\")\n self.pool5 = self.max_pool(self.conv5_3, 'pool5')\n\n self.fc6 = self.fc_layer(self.pool5, \"fc6\")\n assert self.fc6.get_shape().as_list()[1:] == [4096]\n self.relu6 = tf.nn.relu(self.fc6)\n\n self.fc7 = self.fc_layer(self.relu6, \"fc7\")\n self.relu7 = tf.nn.relu(self.fc7)\n\n self.fc8 = self.fc_layer(self.relu7, \"fc8\")\n\n # self.fc9 = self.fc_layer(self.fc8,'fc9')\n # self.relu9 = tf.nn.relu(self.fc9)\n\n\n\n\n relu8 = tf.nn.relu(self.fc8)\n fc9 = self.fc_layer(relu8, 'fc9')\n print((\"build model finished: %ds\" % (time.time() - start_time)))\n return fc9\n\n # self.prob = tf.nn.softmax(self.fc8, name=\"prob\")",
"def VGGModel(input_shape):\n \n\n X_input = Input(input_shape)\n \n # Creating a Neural Network (VGG-16)\n\n X = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(X_input)\n X = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(X)\n X = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(X)\n\n # Block 2\n X = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(X)\n X = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(X)\n X = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(X)\n\n # Block 3\n X = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(X)\n X = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(X)\n X = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(X)\n X = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(X)\n\n # Block 4\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(X)\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(X)\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(X)\n X = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(X)\n\n # Block 5\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(X)\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(X)\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(X)\n X = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(X)\n \n X = Flatten()(X)\n X = Dense(4096, activation='relu', kernel_initializer = 'he_normal', name='fc')(X)\n X = Dense(4096, activation='relu', kernel_initializer = 'he_normal', name='fc2')(X)\n X = Dense(2048, activation='relu', kernel_initializer = 'he_normal', name='fc3')(X)\n X = Dense(1024, activation='relu', kernel_initializer = 'he_normal', name='fc4')(X)\n X = Dense(512, activation='relu', kernel_initializer = 'he_normal', name='fc5')(X)\n X = Dense(256, activation='relu', kernel_initializer = 'he_normal', name='fc6')(X)\n X = Dense(2, activation='linear', name='regression')(X)\n model = Model(inputs=X_input, outputs = X, name='HappyModel')\n print(model.summary())\n \n return model",
"def baseUNet(input_shape,conv_depth,n_classes,init_w,dropout):\n inputs = Input(input_shape)\n\n c1=Conv2D(conv_depth,\n (3,3),\n activation='relu',\n padding='same',\n kernel_initializer=init_w)(inputs)\n\n c1=Conv2D(conv_depth,\n (3,3),\n activation='relu',\n padding=\"same\",\n kernel_initializer=init_w)(c1)\n\n # pool down to next layer\n pool1 = MaxPooling2D((2,2),strides = (2,2))(c1)\n\n conv_depth *= 2\n\n # convolute down again\n conv2 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool1)\n\n conv2 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv2)\n \n # pool down again\n pool2 = MaxPooling2D((2,2),strides = (2,2))(conv2)\n\n conv_depth *= 2 \n\n # Convolution\n conv3 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool2)\n\n conv3 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv3)\n \n # pool down\n pool3 = MaxPooling2D((2,2),strides = (2,2))(conv3)\n\n conv_depth *= 2 \n # Convolution\n conv4 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool3)\n\n conv4 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv4)\n \n # pool down \n pool4 = MaxPooling2D((2,2),strides = (2,2))(conv4)\n\n conv_depth *=2 \n\n # Convolution\n conv5 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool4)\n\n conv5 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv5)\n\n drop = Dropout(dropout)(conv5)\n\n conv_depth /= 2\n conv_depth = int(conv_depth) \n # do upsampling\n up1 = UpSampling2D(size = (2,2))(drop)\n conv6 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up1)\n \n # add in skip info\n cat1 = concatenate([conv4,conv6],axis = 3)\n conv6 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat1)\n\n conv6 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv6)\n\n conv_depth /= 2\n conv_depth = int(conv_depth)\n # do upsampling\n up2 = UpSampling2D(size = (2,2))(conv6)\n conv7 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up2)\n \n # add in skip info\n cat2 = concatenate([conv3,conv7],axis = 3)\n conv7 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat2)\n\n conv7 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv7)\n \n conv_depth /= 2\n conv_depth = int(conv_depth)\n # do upsampling\n up3 = UpSampling2D(size = (2,2))(conv7)\n conv8 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size=(3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up3)\n \n # add in skip info\n cat3 = concatenate([conv2,conv8],axis = 3)\n conv8 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat3)\n\n conv8 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv8)\n \n conv_depth /= 2\n conv_depth = int(conv_depth)\n # do upsampling\n up4 = UpSampling2D(size = (2,2))(conv8)\n conv9 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up4)\n \n # add in skip info\n cat4 = concatenate([c1,conv9],axis = 3)\n conv9 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat4)\n\n conv9 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv9)\n\n outputs = Conv2D(n_classes, 1, activation = 'softmax')(conv9)\n\n return outputs,inputs",
"def __init__(self, in_channels, BN, bn_eps=1e-5):\n super(InceptionE, self).__init__()\n self.branch1x1 = omth_blocks.conv_block(in_channels, filters=[320], kernel_sizes=[1], stride=[1],\n padding=[0], batch_norm=BN)\n\n self.branch3x3_1 = omth_blocks.conv_block(in_channels, filters=[384], kernel_sizes=[1], stride=[1],\n padding=[0], batch_norm=BN)\n self.branch3x3_2 = self.sub_inception_module(BN)\n\n self.branch3x3dbl_1 = omth_blocks.conv_block(in_channels, filters=[384, 384], kernel_sizes=[1, 3], stride=[1, 1],\n padding=[0, 1], batch_norm=BN)\n self.branch3x3dbl_2 = self.sub_inception_module(BN)\n\n self.branch_pool = omth_blocks.conv_block(in_channels, filters=[192], kernel_sizes=[1], stride=[1],\n padding=[0], batch_norm=BN)",
"def get_model(summary=False):\n\timage_input=Input(shape=(220,220,5),name='image_input')\n\tbranch1_conv1=Conv2D(64, kernel_size=(3, 3), border_mode='same', input_shape=(220,220,5), activation='relu')(image_input)\n\tbranch1_conv2=Conv2D(64, kernel_size=(1, 1), border_mode='same', activation='relu')(branch1_conv1)\t\n\tbranch1_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch1_conv1)\n\tbranch2_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch1_pool1)\n\tbranch2_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch2_conv1)\t\n\tbranch2_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch2_conv2)\n\tbranch3_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch2_pool1)\n\tbranch3_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch3_conv1)\t\n\tbranch3_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch3_conv2)\n\tbranch4_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch3_pool1)\n\tbranch4_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch4_conv1)\t\n\tbranch4_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch4_conv2)\n\tbranch5_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch4_pool1)\n\tbranch5_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch5_conv1)\t\n\tbranch5_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch5_conv2)\n\tbranch6_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch5_pool1)\n\tbranch6_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch6_conv1)\t\n\tbranch6_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch6_conv2)\n\tbranch1_flat=Flatten()(branch6_pool1)\n\tdrop=Dropout(.3)(branch1_flat)\n\t# FC layers group\n\tdense1=Dense(512, activation='relu', name='fc1')(drop)\n\tdrop1=Dropout(.3)(dense1)\n\tdense2=Dense(256, activation='relu', name='fc2')(drop1)\n\tdrop3=Dropout(.3)(dense2)\n\tout=Dense(2, activation='softmax', name='fc4')(drop3)\n\tmodel=Model(inputs=image_input,outputs=out)\n\treturn model",
"def __init__(self, dropout_rate, num_classes, include_top, layer):\r\n super(VGG16_Rand, self).__init__()\r\n print(\"CIFAR VGG16_Rand is used\")\r\n self.dropout_rate = dropout_rate\r\n self.num_classes = num_classes\r\n self.include_top = include_top\r\n self.layer = layer\r\n self.bias = True\r\n\r\n # Define the building blocks\r\n if layer <= 11:\r\n self.conv11 = CONV_3x3rand(3, 64, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n else:\r\n self.conv11 = CONV_3x3(3, 64, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n\r\n if layer <= 12:\r\n self.conv12 = nn.Sequential(CONV_3x3rand(64, 64, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv12 = nn.Sequential(CONV_3x3(64, 64, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n\r\n if layer <= 21:\r\n self.conv21 = CONV_3x3rand(64, 128, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n else:\r\n self.conv21 = CONV_3x3(64, 128, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n\r\n if layer <= 22:\r\n self.conv22 = nn.Sequential(CONV_3x3rand(128, 128, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv22 = nn.Sequential(CONV_3x3(128, 128, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n\r\n if layer <= 31:\r\n self.conv31 = CONV_3x3rand(128, 256, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n else:\r\n self.conv31 = CONV_3x3(128, 256, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n\r\n if layer <= 32:\r\n self.conv32 = CONV_3x3rand(256, 256, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n else:\r\n self.conv32 = CONV_3x3(256, 256, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n\r\n if layer <= 33:\r\n self.conv33 = nn.Sequential(CONV_3x3rand(256, 256, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv33 = nn.Sequential(CONV_3x3(256, 256, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n\r\n if layer <= 41:\r\n self.conv41 = CONV_3x3rand(256, 512, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n else:\r\n self.conv41 = CONV_3x3(256, 512, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n\r\n if layer <= 42:\r\n self.conv42 = CONV_3x3rand(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n else:\r\n self.conv42 = CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n\r\n if layer <= 43:\r\n self.conv43 = nn.Sequential(CONV_3x3rand(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv43 = nn.Sequential(CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n\r\n if layer <= 51:\r\n self.conv51 = CONV_3x3rand(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n else:\r\n self.conv51 = CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n\r\n if layer <= 52:\r\n self.conv52 = CONV_3x3rand(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n else:\r\n self.conv52 = CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n\r\n if layer <= 53:\r\n self.conv53 = nn.Sequential(CONV_3x3rand(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv53 = nn.Sequential(CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n\r\n self.avgpool = nn.AdaptiveAvgPool2d(1)\r\n self.fc = nn.Sequential(nn.Linear(512, 4096),\r\n nn.ReLU(True),\r\n nn.Linear(4096, 4096),\r\n nn.ReLU(True),\r\n nn.Linear(4096, num_classes))\r\n\r\n # Initialize the weights\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\r\n elif isinstance(m, nn.BatchNorm2d):\r\n # raise Exception('You are using a model without BN!!!')\r\n nn.init.constant_(m.weight, 1)\r\n nn.init.constant_(m.bias, 0)",
"def __init__(self):\n super(AlexNet, self).__init__()\n\n initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=0.01)\n bias = tf.keras.initializers.Ones()\n bias0 = tf.keras.initializers.Zeros()\n self.drop = tf.keras.layers.Dropout(.5)\n\n # Input is 227 and not 224 as stated on the doc.\n # See issue: https://stackoverflow.com/questions/36733636/the-number-of-neurons-in-alexnet\n self.conv1_1 = tf.keras.layers.Conv2D(48, 11, strides=4, activation=\"relu\", input_shape=[227, 227, 3],\n kernel_initializer=initializer, bias_initializer=bias0)\n self.conv1_2 = tf.keras.layers.Conv2D(48, 11, strides=4, activation=\"relu\", input_shape=[227, 227, 3],\n kernel_initializer=initializer, bias_initializer=bias0)\n # Output: 227 - 11 / 4 + 1 = 55\n # Maxpool: 55 / 2 = 27.5 = ~27\n\n self.conv2_1 = tf.keras.layers.Conv2D(128, 5, activation=\"relu\", kernel_initializer=initializer,\n bias_initializer=bias, padding=\"SAME\")\n self.conv2_2 = tf.keras.layers.Conv2D(128, 5, activation=\"relu\", kernel_initializer=initializer,\n bias_initializer=bias, padding=\"SAME\")\n # Output: 27\n # Maxpool: 27 / 2 = 13.5 = ~13\n\n self.conv3_1 = tf.keras.layers.Conv2D(192, 3, activation=\"relu\", kernel_initializer=initializer,\n bias_initializer=bias, padding=\"SAME\")\n self.conv3_2 = tf.keras.layers.Conv2D(192, 3, activation=\"relu\", kernel_initializer=initializer,\n bias_initializer=bias, padding=\"SAME\")\n # Output: 13\n\n self.conv4_1 = tf.keras.layers.Conv2D(192, 3, activation=\"relu\", kernel_initializer=initializer,\n bias_initializer=bias, padding=\"SAME\")\n self.conv4_2 = tf.keras.layers.Conv2D(192, 3, activation=\"relu\", kernel_initializer=initializer,\n bias_initializer=bias, padding=\"SAME\")\n # Output: 13\n\n self.conv5_1 = tf.keras.layers.Conv2D(128, 3, activation=\"relu\", kernel_initializer=initializer,\n bias_initializer=bias, padding=\"SAME\")\n self.conv5_2 = tf.keras.layers.Conv2D(128, 3, activation=\"relu\", kernel_initializer=initializer,\n bias_initializer=bias, padding=\"SAME\")\n # Output: 13\n\n self.max_pool = tf.keras.layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2))\n # Output: 13 / 2 = 6.5 = ~6\n\n self.flatten = tf.keras.layers.Flatten()\n\n # Input: 6 * 6 * 128 * 2 = 9216\n self.fc1_1 = tf.keras.layers.Dense(2048, activation=\"relu\", kernel_initializer=initializer,\n bias_initializer=bias)\n self.fc1_2 = tf.keras.layers.Dense(2048, activation=\"relu\", kernel_initializer=initializer,\n bias_initializer=bias)\n\n self.fc2_1 = tf.keras.layers.Dense(2048, activation=\"relu\", kernel_initializer=initializer,\n bias_initializer=bias)\n self.fc2_2 = tf.keras.layers.Dense(2048, activation=\"relu\", kernel_initializer=initializer,\n bias_initializer=bias)\n\n self.fc3 = tf.keras.layers.Dense(1000, activation=\"softmax\")",
"def build_layer(self) :\n inputsWithBias = self.input_count + 1\n self.weights = np.random.rand(inputsWithBias, self.node_count)\n self.weights_and_activations = (self.weights, self.activations)",
"def discriminator_block(in_filters, out_filters, f_size=4, normalize=True,stride=2):\n layers = [nn.Conv2d(in_filters, out_filters, f_size, stride=stride, padding=0)]\n if normalize:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers",
"def _build_model(self, image_input_source, encoder_input_source, dropout_toggle):\n\t\t# We have to match this output size.\n\t\tbatch, input_height, input_width, input_depth = image_input_source.get_shape().as_list()\n\t\n\t\tfilter_sizes = [64, 64, 64] # Like VGG net, except made by a stupid person.\n\t\n\t\t# Convolutional ops will go here.\n\t\tc0, wc0, bc0 = self._build_conv(image_input_source, [3, 3, input_depth, filter_sizes[0]], [1, 1, 1, 1], activate=False)\n\t\tc1 = self._build_max_pool(c0, [1, 2, 2, 1], [1, 2, 2, 1])\n\t\tc2, wc2, bc2 = self._build_conv(self._build_dropout(c1, dropout_toggle), [3, 3, filter_sizes[0], filter_sizes[1]], [1, 1, 1, 1])\n\t\tc3 = self._build_max_pool(c2, [1, 2, 2, 1], [1, 2, 2, 1])\n\t\tc4, wc4, bc4 = self._build_conv(self._build_dropout(c3, dropout_toggle), [3, 3, filter_sizes[1], filter_sizes[2]], [1, 1, 1, 1])\n\t\tc5 = self._build_max_pool(c4, [1, 2, 2, 1], [1, 2, 2, 1])\n\t\tconv_output = c5\n\t\n\t\t# Transition to FC layers.\n\t\tpre_flat_shape = conv_output.get_shape().as_list()\n\t\tflatten = tf.reshape(conv_output, [-1, pre_flat_shape[1]*pre_flat_shape[2]*pre_flat_shape[3]])\n\t\n\t\t# Dense connections\n\t\tfc0, wf0, bf0 = self._build_fc(flatten, 512)\n\t\tfc1, wf1, bf1 = self._build_fc(fc0, 512)\n\t\tfc2, wf2, bf2 = self._build_fc(self._build_dropout(fc1, dropout_toggle), self.REPRESENTATION_SIZE)\n\t\tfc_out = fc2\n\t\n\t\t# Output point and our encoder mix-in.\n\t\tmu_output, wmu, bmu = self._build_fc(fc_out, self.REPRESENTATION_SIZE)\n\t\tz_output, wz, bz = self._build_fc(fc_out, self.REPRESENTATION_SIZE)\n\t\tencoded_output = tf.random_normal(mean=mu_output, stddev=z_output, shape=z_output.get_shape()) #tf.nn.softmax(fc_out)\n\t\tencoded_input = self._build_dropout(encoder_input_source + encoded_output, dropout_toggle) # Mix input and enc.\n\t\tencoded_input.set_shape(encoded_output.get_shape()) # Otherwise we can't ascertain the size.\n\t\n\t\t# More dense connections on the offset.\n\t\tdfc2, dwf2, dbf2 = self._build_fc(encoded_input, 512, weight=tf.transpose(wf2), bias=tf.transpose(bf1))\n\t\tdfc1, dwf1, dbf1 = self._build_fc(dfc2, 512, weight=tf.transpose(wf1), bias=tf.transpose(bf0))\n\t\tdfc0, dwf0, dbf0 = self._build_fc(self._build_dropout(dfc1, dropout_toggle), flatten.get_shape().as_list()[-1], weight=tf.transpose(wf0))\n\t\n\t\t# Expand for more convolutional operations.\n\t\tunflatten = tf.reshape(dfc0, [-1, pre_flat_shape[1], pre_flat_shape[2], pre_flat_shape[3]]) #pre_flat_shape)\n\t\n\t\t# More convolutions here.\n\t\tdc5 = self._build_unpool(unflatten, [1, 2, 2, 1])\n\t\tdc4, wdc4, bdc4 = self._build_deconv(self._build_dropout(dc5, dropout_toggle), c3.get_shape().as_list(), [3, 3, filter_sizes[1], filter_sizes[2]], [1, 1, 1, 1])\n\t\tdc3 = self._build_unpool(dc4, [1, 2, 2, 1])\n\t\tdc2, wdc2, bdc2 = self._build_deconv(self._build_dropout(dc3, dropout_toggle), c1.get_shape().as_list(), [3, 3, filter_sizes[0], filter_sizes[1]], [1, 1, 1, 1])\n\t\tdc1 = self._build_unpool(dc2, [1, 2, 2, 1])\n\t\tdc0, wdc0, bdc0 = self._build_deconv(dc1, [batch, input_height, input_width, input_depth], [3, 3, input_depth, filter_sizes[0]], [1, 1, 1, 1], activate=False)\n\t\tdeconv_output = dc0\n\t\n\t\t# Return result + encoder output\n\t\treturn deconv_output, encoded_output",
"def discriminator_block(in_filters, out_filters, bn=True):\n block = [ nn.Conv2d(in_filters, out_filters, 3, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Dropout2d(0.25)]\n if bn:\n block.append(nn.BatchNorm2d(out_filters, 0.8))\n return block",
"def __init__(self, in_channels, out_channels, kernel_size, num_layers=3,\n dropout_rate=0.3):\n super().__init__()\n\n _validate_args(in_channels, out_channels, kernel_size, num_layers)\n\n self.kernel_size = kernel_size\n self.padding = (kernel_size - 1) // 2\n\n conv_list = [\n self._create_single_block(in_channels, out_channels)\n ]\n\n # Create the rest of the blocks\n for _ in range(num_layers - 1):\n conv_list.append(\n self._create_single_block(out_channels, out_channels)\n )\n\n self.convs = nn.Sequential(*conv_list)\n self.drop = nn.Dropout2d(dropout_rate)\n self.pool = nn.MaxPool2d((2, 2), return_indices=True)",
"def build(width, height, depth, classes, stages, filters, include_top, pooling,\n reg=1e-3, bnEps=2e-5, bnMom=0.0):\n inputShape = (height, width, depth)\n chanDim = -1\n\n if K.image_data_format() == \"channels_first\":\n inputShape = (depth, height, width)\n chanDim = 1\n\n inputs = Input(shape=inputShape)\n\n\n # block 1 (initial conv block)\n x = ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(inputs)\n x = Conv2D(64, (7,7), use_bias=False, strides=(2,2),\n kernel_initializer=\"he_normal\", kernel_regularizer=l2(reg))(x)\n x = BatchNormalization(axis=chanDim, name=\"bn_conv1\")(x)\n x = Activation(\"relu\")(x)\n x = ZeroPadding2D(padding=((1,1), (1,1)), name=\"pool1_pad\")(x)\n x = MaxPooling2D(3, strides=2)(x)\n\n for i in range(0, len(stages)):\n stride = (1,1) if i == 0 else (2,2) # block 2 (projection block) w stride(1,1)\n\n print(\"Stage {}, Stride={}\".format(i, stride))\n x = SEResNet.residual_module(x, filters[i+1], stride,\n chanDim=chanDim, red=True, bnEps=bnEps, bnMom=bnMom)\n for j in range(0, stages[i] + 1): #stacking res block to each depth layer\n x = SEResNet.residual_module(x, filters[i+1], stride=(1,1),\n chanDim=chanDim, bnEps=bnEps,\n bnMom=bnMom)\n x = BatchNormalization(axis=chanDim, epsilon=bnEps,\n momentum=bnMom)(x)\n x = Activation(\"relu\")(x)\n\n if include_top:\n x = GlobalAveragePooling2D()(x)\n x = Dense(classes, use_bias=False, kernel_regularizer=l2(reg),\n activation='softmax')(x)\n else:\n if pooling == 'avg':\n print(\"Adding average pool\")\n x = GlobalAveragePooling2D()(x)\n elif pooling == 'max':\n x = GlobalMaxPooling2D()(x)\n\n model = Model(inputs=inputs, outputs=x, name=\"SEResNet\")\n return model",
"def dir_conv_block(model, nb_layers, nb_filters, rate):\n\n for _ in range(nb_layers):\n model, rate = dir_conv_layer(model, nb_filters, rate)\n model = MaxPooling1D()(model)\n model = Dropout(0.1)(model)\n return model, rate",
"def discriminator_block(in_filters, out_filters, normalize=True):\n layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]\n if normalize:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers"
] | [
"0.6322069",
"0.62509495",
"0.6210222",
"0.6205929",
"0.61850744",
"0.6165662",
"0.61332285",
"0.6119399",
"0.6096222",
"0.60777825",
"0.6053835",
"0.60508585",
"0.60431457",
"0.6030179",
"0.60178214",
"0.6007941",
"0.5995195",
"0.5962688",
"0.59607214",
"0.59575105",
"0.59514",
"0.59498066",
"0.5914623",
"0.59121513",
"0.590779",
"0.59007925",
"0.5893799",
"0.58919287",
"0.5886318",
"0.58854264"
] | 0.7157092 | 0 |
raise WinproxyError if result is 0 | def fail_on_zero(func_name, result, func, args):
if not result:
raise WinproxyError(func_name)
return args | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def winhttp_WinHttpFreeProxyResult(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"pProxyResult\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def winhttp_WinHttpGetProxyResult(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hResolver\", \"pProxyResult\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def ErrCheckBool(result, func, args):\n if not result:\n raise WinError()\n return args",
"def ErrCheckHandle(result, func, args):\n if not result:\n raise WinError()\n return AutoHANDLE(result)",
"def error_if_null_return(retval: Any, func: Callable, args: Tuple[Any]):\n if not retval:\n raise WinError()\n return retval",
"def _check_return(self, name, ret_code):\n if ret_code == 0:\n pass\n else:\n raise RuntimeError('An error occured setting %s: %d' % (name, ret_code))",
"def retry(self):\n # XXX: check whether it is possible to distingish \n # between the error conditions and set meaningfull exitcode\n return False",
"def _check_error(return_value):\n if return_value < 0:\n raise IOError(pm.lib.Pm_GetErrorText(return_value))",
"def check_proxy(q):\n\n\n if not q.empty():\n\n proxy = q.get(False)\n proxy = proxy.strip()\n\n\n try:\n \n is_working = False\n\n if not is_bad_proxy(proxy):\n proxys_working_list.update({proxy: proxy})\n\n \n\n print(bcolors.OKGREEN + \" --[+] \", proxy, \" | PASS\" + bcolors.ENDC)\n\n else:\n print(\" --[!] \", proxy, \" | FAILED\")\n \n \n\n except Exception as err:\n print(\" --[!] \", proxy, \" | FAILED | \" + str(err))",
"def check_result(ec):\r\n # NOTE: This will break some oscilloscopes that are powered by USB.\r\n # Some of the newer scopes, can actually be powered by USB and will\r\n # return a useful value. That should be given back to the user.\r\n # I guess we can deal with these edge cases in the functions themselves\r\n if ec == 0:\r\n return\r\n\r\n else:\r\n ecName = error_num_to_name(ec)\r\n ecDesc = error_num_to_desc(ec)\r\n raise IOError('Error calling %s: %s (%s)' % (\r\n str(inspect.stack()[1][3]), ecName, ecDesc))",
"def _tunnel_success(tunnel_returncode):\n return tunnel_returncode < 0",
"def proxy_check(self, proxy):",
"def CHK(err):\n if err < 0:\n buf_size = 100\n buf = ctypes.create_string_buffer('\\000' * buf_size)\n nidaq.DAQmxGetErrorString(err,ctypes.byref(buf),buf_size)\n raise RuntimeError('nidaq call failed with error %d: %s'%(err,repr(buf.value)))",
"def _check(self,err):\r\n if err < 0:\r\n buf_size = 128\r\n buf = create_string_buffer('\\000' * buf_size)\r\n self.nidaq.DAQmxGetErrorString(err,byref(buf),buf_size)\r\n raise RuntimeError('NI-DAQ call failed with error %d: %s'%(err,repr(buf.value)))",
"def winhttp_WinHttpGetProxyForUrlEx(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hResolver\", \"pcwszUrl\", \"pAutoProxyOptions\", \"pContext\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def checkResult(self, errorCode):\n # NOTE: This will break some oscilloscopes that are powered by USB.\n # Some of the newer scopes, can actually be powered by USB and will\n # return a useful value. That should be given back to the user.\n # I guess we can deal with these edge cases in the functions themselves\n if errorCode == 0:\n return\n\n else:\n ecName = self.errorNumToName(errorCode)\n ecDesc = self.errorNumToDesc(errorCode)\n raise IOError('Error calling %s: %s (%s)' % (\n str(inspect.stack()[1][3]), ecName, ecDesc))",
"def _checknet():\n exit_code = os.system('ping -c 1 www.baidu.com 1>/dev/null 2>&1')\n return exit_code",
"def winhttp_WinHttpOpen(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"pwszUserAgent\", \"dwAccessType\", \"pwszProxyName\", \"pwszProxyBypass\", \"dwFlags\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def is_bad_proxy(pip, url):\n try:\n res = requests.get(\n url,\n proxies={'http':pip},\n headers={'User-agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0'},\n timeout=10\n )\n except Exception as e:\n return 1\n if res.status_code == 200:\n return 0\n\n print(res.status_code)\n return 1",
"def proxy_error(response):\n r = HttpResponse(\n response.content,\n content_type=response.headers[\"content-type\"],\n status=response.status_code,\n )\n r.setdefault(\"X-PROMGEN-PROXY\", response.url)\n return r",
"def magmaCheckStatus(status):\n\n if status != 0:\n raise MagmaError(status)",
"def _check(error: int) -> None:\n if error < 0:\n raise RuntimeError(ffi.string(lib.TCOD_get_error()).decode())",
"def systcpconnfail(self) :\n\t\ttry :\n\t\t\treturn self._systcpconnfail\n\t\texcept Exception as e:\n\t\t\traise e",
"def check_proxy_status(proxy_ip):\n try:\n status = subprocess.check_output([\"ping\", \"-c\",\"1\", proxy_ip]).decode('utf-8')\n if status.find(\"1 received\") > -1:\n return True\n except subprocess.CalledProcessError as e:\n return False\n\n return False",
"def ConnectByNameError(self) -> _n_0_t_14:",
"def check_exit_code(results):\n assert results[\"metrics\"][\"Exit code\"] == 0",
"def clientconnfail(self) :\n\t\ttry :\n\t\t\treturn self._clientconnfail\n\t\texcept Exception as e:\n\t\t\traise e",
"def connectFailed(self, reason):\n\t\tself._tunnelReadyDeferred.errback(reason)",
"def execute_failure(self, *args, **kwargs):\n return 1, \"\", None",
"def check_success(odbc_obj, ret):\n if ret not in (SQL_SUCCESS, SQL_SUCCESS_WITH_INFO, SQL_NO_DATA, SQL_NULL_DATA):\n if isinstance(odbc_obj, Cursor):\n ctrl_err(SQL_HANDLE_STMT, odbc_obj.stmt_h, odbc_obj.ansi)\n elif isinstance(odbc_obj, Connection):\n ctrl_err(SQL_HANDLE_DBC, odbc_obj.dbc_h, odbc_obj.ansi)\n else:\n ctrl_err(SQL_HANDLE_ENV, odbc_obj, False)"
] | [
"0.68406665",
"0.63366336",
"0.6020998",
"0.6016139",
"0.58826256",
"0.5877293",
"0.5857276",
"0.582949",
"0.57794017",
"0.5773708",
"0.57534397",
"0.56958175",
"0.5652606",
"0.5602097",
"0.5585624",
"0.5577021",
"0.55100924",
"0.5496161",
"0.54908186",
"0.5478192",
"0.54240996",
"0.54150003",
"0.5410886",
"0.5403818",
"0.5382604",
"0.5370788",
"0.5363099",
"0.5312704",
"0.5312337",
"0.52970254"
] | 0.721989 | 0 |
LciaEngine.__getitem__ retrieves a canonical context by more intensively searching for matches from a given context. Adds foreign context's full name as synonym if one is affirmatively found. If one is not found, returns the NullContext. None is returned as None, to represent 'unspecified' (i.e. accept all) as opposed to 'no context' which isa context (accept only matching). (as tested) | def __getitem__(self, item):
if item is None:
return None
try:
return self._cm.__getitem__(item)
except KeyError:
if isinstance(item, Context):
return self._cm.find_matching_context(item)
elif isinstance(item, tuple) and len(item) > 1:
return self.__getitem__(item[1:])
return NullContext | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __dis_context__(self, context, word):\n senses = self.vs.get_senses(word, self.ignore_case)\n if self.verbose:\n print(\"Senses of a target word:\")\n print(senses)\n\n if len(senses) == 0: # means we don't know any sense for this word\n return None\n\n # collect context vectors\n vctx = [self.vc[c] for c in context]\n\n if len(vctx) == 0: # means we have no context\n return None\n # TODO: better return most frequent sense or make random choice\n\n # filter context vectors, if aplicable\n if self.filter_ctx >= 0:\n vctx = self.__filter__(vctx, senses, self.filter_ctx)\n\n if self.ctx_method == 'prob':\n avg_context = np.mean(vctx, axis=0)\n scores = [self.__logprob__(avg_context, self.vs[sense]) for sense, prob in senses]\n\n elif self.ctx_method == 'sim':\n avg_context = np.mean(vctx, axis=0)\n scores = [self.__cosine_sim__(avg_context, self.vs[sense]) for sense, prob in senses]\n if self.verbose:\n print(\"Sense probabilities:\")\n print(scores)\n\n else:\n raise ValueError(\"Unknown context handling method '%s'\" % self.ctx_method)\n\n # return sense (word#id), scores for senses\n return senses[np.argmax(scores)][0], scores",
"def _plug_from_context(self):\n children = self.smartplug.sys_info[\"children\"]\n return next(c for c in children if c[\"id\"] == self.smartplug.context)",
"def get_context(self):\n symbol_chain = self.split_completion_object(self.get_word_before())\n current_rs_object = self.module\n\n for symbol in symbol_chain:\n try:\n current_rs_object = current_rs_object.get_object(symbol)\n logging.info(f\"New context found: {current_rs_object.name}\")\n except:\n logging.error(f\"{type(current_rs_object)} has no method get_object yet.\")\n return current_rs_object\n\n return current_rs_object",
"def _resolve_context(r, tablename, context):\n\n record_id = r.id\n if not record_id:\n return None\n\n if not context:\n query = None\n\n elif type(context) is tuple:\n context, field = context\n query = (FS(context) == r.record[field])\n\n elif context == \"location\":\n # Show records linked to this Location & all it's Child Locations\n s = \"(location)$path\"\n # This version doesn't serialize_url\n #m = (\"%(id)s/*,*/%(id)s/*\" % dict(id=id)).split(\",\")\n #filter = (FS(s).like(m)) | (FS(s) == id)\n m = (\"%(id)s,%(id)s/*,*/%(id)s/*,*/%(id)s\" % dict(id=record_id)).split(\",\")\n m = [f.replace(\"*\", \"%\") for f in m]\n query = (FS(s).like(m))\n # @ToDo:\n #elif context == \"organisation\":\n # # Show records linked to this Organisation and all it's Branches\n # s = \"(%s)\" % context\n # query = (FS(s) == id)\n else:\n # Normal: show just records linked directly to this master resource\n s = \"(%s)\" % context\n query = (FS(s) == record_id)\n\n # Define target resource\n resource = current.s3db.resource(tablename, filter=query)\n r.customise_resource(tablename)\n return resource, query",
"def _repair_context(self, context):\n\n if isinstance(context, Graph):\n return self._repair_context(context.identifier)\n elif isinstance(context, URIRef):\n return context\n else:\n return NULL_CONTEXT",
"def _find_context(\n node: ast.AST,\n contexts: Tuple[Type[ast.AST], ...],\n) -> Optional[ast.AST]:\n parent = get_parent(node)\n if parent is None:\n return None\n elif isinstance(parent, contexts):\n return parent\n return _find_context(parent, contexts)",
"def set_context(self):\n results = self.rml.query(\"\"\"\n SELECT ?o {\n {\n ?s rr:class ?o\n } UNION {\n ?s rr:predicate ?o\n }\n }\"\"\")\n namespaces = [Uri(row[0]).value[0]\n for row in results\n if isinstance(row[0], rdflib.URIRef)]\n self.context = {ns[0]: ns[1] for ns in namespaces if ns[0]}",
"def __getitem__(self, name):\r\n if name not in self.current_context:\r\n raise KeyError(\"Trying to use an undefined label!\")\r\n return self.current_context[name]",
"def test_context2vec_context_similarity(self):\n contexts = {1:{0, 1, 2, 3}, 2:{2, 3, 4, 5}, 3:{10}}\n cc = ContextCorpus(contexts)\n model = Word2Vec(cc, size=3, iter=1000, min_count=1, alpha=0.1, sg=0)\n results = filter(lambda x: x[0].startswith('C'), model.most_similar('C1',\n topn=100))\n self.assertTrue(results[0][0] == 'C2')",
"def get_from_context(cls, context):\n obj = context.active_object\n\n if obj and obj.type not in {\"LAMP\", \"CAMERA\"}:\n mat = obj.active_material\n\n if mat:\n # ID pointer\n node_tree = mat.appleseed.osl_node_tree\n\n if node_tree:\n return node_tree, mat, mat\n\n elif obj and obj.type == \"LAMP\":\n node_tree = obj.data.appleseed.osl_node_tree\n\n if node_tree:\n return node_tree, None, None\n\n return None, None, None",
"def resolve(self, context, safe=True, quiet=False):\r\n try:\r\n current = context\r\n for bit in self.bits:\r\n try: # dictionary lookup\r\n current = current[bit]\r\n except (TypeError, AttributeError, KeyError):\r\n try: # attribute lookup\r\n current = getattr(current, bit)\r\n except (TypeError, AttributeError):\r\n try: # list-index lookup\r\n current = current[int(bit)]\r\n except (IndexError, # list index out of range\r\n ValueError, # invalid literal for int()\r\n KeyError, # dict without `int(bit)` key\r\n TypeError, # unsubscriptable object\r\n ):\r\n raise ValueError('Failed lookup for key [%s] in %r'\r\n ', when resolving the accessor %s'\r\n % (bit, current, self))\r\n if callable(current):\r\n if safe and getattr(current, 'alters_data', False):\r\n raise ValueError('refusing to call %s() because `.alters_data = True`'\r\n % repr(current))\r\n current = current()\r\n # important that we break in None case, or a relationship\r\n # spanning across a null-key will raise an exception in the\r\n # next iteration, instead of defaulting.\r\n if current is None:\r\n break\r\n return current\r\n except:\r\n if not quiet:\r\n raise",
"def __call__(self, context=None):\n if context is None:\n context = self.context\n return self.entity.resolve(context)",
"def get(self, key):\n keystr = str(key)\n res = None\n\n try:\n res = self.ctx[keystr]\n except KeyError:\n for k, v in self.ctx.items():\n if \"name\" in v and v[\"name\"].lower() == keystr.lower():\n res = v\n break\n\n return res",
"def xontrib_context(name):\n spec = find_xontrib(name)\n if spec is None:\n return None\n m = importlib.import_module(spec.name)\n pubnames = getattr(m, \"__all__\", None)\n if pubnames is not None:\n ctx = {k: getattr(m, k) for k in pubnames}\n else:\n ctx = {k: getattr(m, k) for k in dir(m) if not k.startswith(\"_\")}\n return ctx",
"def common_contexts(self, words, fail_on_unknown=False):\n words = [self._key(w) for w in words]\n contexts = [set(self._word_to_contexts[w]) for w in words]\n empty = [words[i] for i in range(len(words)) if not contexts[i]]\n common = reduce(set.intersection, contexts)\n if empty and fail_on_unknown:\n raise ValueError(\"The following word(s) were not found:\", \" \".join(words))\n elif not common:\n # nothing in common -- just return an empty freqdist.\n return FreqDist()\n else:\n fd = FreqDist(\n c for w in words for c in self._word_to_contexts[w] if c in common\n )\n return fd",
"def currentCtx(*args, **kwargs)->AnyStr:\n pass",
"def ctx():\n return None",
"def open_context(self, description, shared=True):\n\n cu_name = description[\"cu_name\"]\n context = self.contexts.get(cu_name)\n if context:\n return context[\"idx\"]\n if _xrt_version >= (2, 6, 0):\n cu_index = xrt.xclIPName2Index(self.handle, cu_name)\n description[\"cu_index\"] = cu_index\n else:\n cu_index = description[\"cu_index\"]\n\n uuid = bytes.fromhex(description[\"xclbin_uuid\"])\n uuid_ctypes = XrtUUID((ctypes.c_char * 16).from_buffer_copy(uuid))\n err = xrt.xclOpenContext(self.handle, uuid_ctypes, cu_index, shared)\n\n if err:\n raise RuntimeError(\n \"Could not open CU context - {}, {}\".format(err, cu_index)\n )\n # Setup the execution context for the compute unit\n self.contexts[cu_name] = {\n \"cu\": cu_name,\n \"idx\": cu_index,\n \"uuid_ctypes\": uuid_ctypes,\n \"shared\": shared,\n }\n\n return cu_index",
"def __getitem__(self, key):\n try:\n att = getattr(self, key)\n return att\n except AttributeError:\n # mimic pylons context\n return None",
"def context(self) -> CONTEXT:",
"def get_context(event):\n for link in event.links.links:\n if link.get(\"type\") == \"CONTEXT\":\n context = link.get(\"target\")\n break\n else:\n context = None\n return context",
"def lookup():",
"def getQueryAnchor(self, context, metatype=None):\n for o in list(context.aq_chain):\n if IExtropyTracking.providedBy(o):\n if metatype is None:\n return o\n elif hasattr(o,'meta_type') and metatype == o.meta_type:\n return o\n return getToolByName(self, 'portal_url').getPortalObject()",
"def __getitem__(self, item):\r\n current = self\r\n while current is not None:\r\n if item in current.locals:\r\n return current.locals[item]\r\n current = current.parent",
"def __contexts(\n self, triple: \"_TripleType\"\n ) -> Generator[\"_ContextType\", None, None]:\n # type error: Argument 2 to \"get\" of \"Mapping\" has incompatible type \"str\"; expected \"Optional[Graph]\"\n return (\n self.__context_obj_map.get(ctx_str, ctx_str) # type: ignore[arg-type]\n for ctx_str in self.__get_context_for_triple(triple, skipQuoted=True)\n if ctx_str is not None\n )",
"def resolveContext(self, context):\n if context is None:\n return context\n elif isinstance(context, tuple):\n return context\n elif isinstance(context, tuple):\n return tuple(context.split('/'))\n else:\n return context.getPhysicalPath()",
"def get_word_context(word):\r\n\tfor content, profile in word_context_profile:\r\n\t\tif word == content:\r\n\t\t\treturn profile \r\n\treturn 0",
"def get_doc_context(self, docname, body, metatags):\n\n # TYPO3: remove 'documentation' from end of 'shorttitle'\n shorttitle = self.globalcontext.get('shorttitle', '')\n if shorttitle and shorttitle.endswith(' documentation'):\n shorttitle = shorttitle[0:-14].rstrip()\n self.globalcontext['shorttitle'] = shorttitle\n \n\n # find out relations\n # TYPO3: always have order 'previous', 'up', 'next'\n prev = up = next = None\n parents = []\n rellinks = self.globalcontext['rellinks'][:]\n related = self.relations.get(docname)\n titles = self.env.titles\n if related and related[1]:\n try:\n prev = {\n 'link': self.get_relative_uri(docname, related[1]),\n 'title': self.render_partial(titles[related[1]])['title']\n }\n rellinks.append((related[1], prev['title'], 'P', _('previous')))\n except KeyError:\n # the relation is (somehow) not in the TOC tree, handle\n # that gracefully\n prev = None\n if related and related[0]:\n try:\n up = {\n 'link': self.get_relative_uri(docname, related[0]),\n 'title': self.render_partial(titles[related[0]])['title']\n }\n rellinks.append((related[0], up['title'], 'U', _('up')))\n except KeyError:\n # the relation is (somehow) not in the TOC tree, handle\n # that gracefully\n prev = None\n if related and related[2]:\n try:\n next = {\n 'link': self.get_relative_uri(docname, related[2]),\n 'title': self.render_partial(titles[related[2]])['title']\n }\n rellinks.append((related[2], next['title'], 'N', _('next')))\n except KeyError:\n next = None\n while related and related[0]:\n try:\n parents.append(\n {'link': self.get_relative_uri(docname, related[0]),\n 'title': self.render_partial(titles[related[0]])['title']})\n except KeyError:\n pass\n related = self.relations.get(related[0])\n if parents:\n parents.pop() # remove link to the master file; we have a generic\n # \"back to index\" link already\n parents.reverse()\n\n # title rendered as HTML\n title = self.env.longtitles.get(docname)\n title = title and self.render_partial(title)['title'] or ''\n # the name for the copied source\n sourcename = self.config.html_copy_source and docname + '.txt' or ''\n\n # metadata for the document\n meta = self.env.metadata.get(docname)\n\n # local TOC and global TOC tree\n self_toc = self.env.get_toc_for(docname, self)\n toc = self.render_partial(self_toc)['fragment']\n\n return dict(\n parents = parents,\n prev = prev,\n next = next,\n title = title,\n meta = meta,\n body = body,\n metatags = metatags,\n rellinks = rellinks,\n sourcename = sourcename,\n toc = toc,\n # only display a TOC if there's more than one item to show\n display_toc = (self.env.toc_num_entries[docname] > 1),\n )",
"def foreign_get(object, item):\n if object is None or object is Null:\n return None\n try:\n if isinstance(object, (list, tuple)):\n item = int(item)\n return foreign_translate(object[item])\n except (KeyError, IndexError):\n return None",
"def context(self, tokens, index, history):\n return tokens[index - 1] if index else None"
] | [
"0.57803524",
"0.52267176",
"0.5220787",
"0.52120763",
"0.5174642",
"0.5164446",
"0.5149356",
"0.5041184",
"0.49859065",
"0.49716657",
"0.49243554",
"0.49187955",
"0.49112827",
"0.4855967",
"0.48273122",
"0.4817933",
"0.47763702",
"0.47615644",
"0.4757728",
"0.47567716",
"0.4750502",
"0.4734812",
"0.47305727",
"0.47237477",
"0.47216007",
"0.46912032",
"0.46878093",
"0.46591023",
"0.46567845",
"0.46564373"
] | 0.6447261 | 0 |
Given a quantity, import its CFs into the local database. Unfortunately this is still going to be slow because every part of the CF still needs to be canonicalized. The only thing that's saved is creating a new Characterization instance. | def import_cfs(self, quantity):
try:
qq = self._canonical_q(quantity)
except KeyError:
qq = self.add_quantity(quantity)
count = 0
for cf in quantity.factors():
count += 1
# print(cf)
try:
fb = self._fm[cf.flowable]
except KeyError:
fb = self._create_flowable(*quantity.query_synonyms(cf.flowable))
self.add_quantity(cf.ref_quantity) # this may lead to the creation of non-converting quantities if units mismatch
cx = self[cf.context]
self._qassign(qq, fb, cf, context=cx)
self._factors_for_later[quantity] = True
print('Imported %d factors for %s' % (count, quantity)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def import_counties():\n\n query = 'INSERT INTO texas_counties(county, region) VALUES(%s,%s)'\n with persistence() as db:\n # create new cursor instance\n cursor = db.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n\n for council, counties in COUNCIL_DATA.items():\n for county in counties:\n cursor.execute(query, (county, council))\n db.commit()",
"def import_in_db(self):\n\n super().new_entry()\n\n if Categories.objects.all().count() > 0:\n try:\n category_compare = Categories.objects.get(\n name=self.product_infos['compare_to']\n )\n except:\n category_compare = None\n else:\n category_compare = None\n\n product_object = Products.objects.create(\n code=self.code,\n name=self.name,\n url=self.product_infos['product_url'],\n quantity=self.product_infos['quantity'],\n country=self.product_infos['countries'],\n ingredients=self.product_infos['ingredients'],\n energy=self.product_infos['energy-kcal_100g'],\n fat=self.product_infos['fat_100g'],\n satured_fat=self.product_infos['saturated-fat_100g'],\n carbohydrates=self.product_infos['carbohydrates_100g'],\n sugar=self.product_infos['sugars_100g'],\n fibers=self.product_infos['fiber_100g'],\n proteins=self.product_infos['proteins_100g'],\n salt=self.product_infos['salt_100g'],\n sodium=self.product_infos['sodium_100g'],\n nutriscore=self.product_infos['nutriscore'],\n image_url=self.product_infos['image_url'],\n compare_to_category=category_compare\n )\n\n ProductImportation.count += 1\n\n return product_object",
"def import_and_clean():\n \n with open(\"inventory.csv\", newline=\"\") as csvfile:\n inventory = csv.DictReader(csvfile)\n rows = list(inventory)\n\n for row in rows:\n row[\"product_price\"] = row[\"product_price\"].replace(\"$\", \"\")\n row[\"product_price\"] = row[\"product_price\"].replace(\".\", \"\")\n row[\"product_price\"] = int(float(row[\"product_price\"]))\n row[\"date_updated\"] = datetime.datetime.strptime(row[\"date_updated\"], \"%m/%d/%Y\")\n row[\"product_quantity\"]= int(row[\"product_quantity\"])\n \n return rows",
"def add_from_file(category_label, fname):\n dirname = os.path.dirname(os.path.abspath(__file__))\n with codecs.open(os.path.join(dirname, fname), encoding='utf-8') as stops_file:\n for line in stops_file:\n if line.startswith('#'):\n continue\n val_name, val_surface_forms = preprocess_cl_line(line)\n for form in val_surface_forms:\n db_add(category_label, val_name, form)",
"def load_chems(self, file_name, update_mappings, chunksize=1000):\n\n logger.info( \"Loading chemicals from [{}]\".format(file_name) )\n\n csv.field_size_limit(10000000)\n input_file = codecs.open(file_name, 'rb', 'utf-8')\n tsvin = csv.reader(input_file, delimiter='\\t')\n\n sql_alc_conn = self.db.connect()\n db_api_conn = sql_alc_conn.connection\n\n chem_ins = DBBatcher(db_api_conn, 'insert into schembl_chemical (id, mol_weight, logp, med_chem_alert, is_relevant, donor_count, acceptor_count, ring_count, rot_bond_count, corpus_count) values (:1, :2, :3, :4, :5, :6, :7, :8, :9, :10)')\n chem_struc_ins = DBBatcher(db_api_conn, 'insert into schembl_chemical_structure (schembl_chem_id, smiles, std_inchi, std_inchikey) values (:1, :2, :3, :4)', self.chem_struc_types)\n chem_map_del = DBBatcher(db_api_conn, 'delete from schembl_document_chemistry where schembl_doc_id = :1 and schembl_chem_id = :2 and field = :3 and (:4 > -1)')\n chem_map_ins = DBBatcher(db_api_conn, 'insert into schembl_document_chemistry (schembl_doc_id, schembl_chem_id, field, frequency) values (:1, :2, :3, :4)')\n if (\"cx_oracle\" in str(self.db.dialect)):\n chem_ins = DBBatcher(db_api_conn, 'insert into schembl_chemical (id, mol_weight, logp, med_chem_alert, is_relevant, donor_count, acceptor_count, ring_count, rot_bond_count, corpus_count) values (:1, :2, :3, :4, :5, :6, :7, :8, :9, :10)')\n chem_struc_ins = DBBatcher(db_api_conn, 'insert into schembl_chemical_structure (schembl_chem_id, smiles, std_inchi, std_inchikey) values (:1, :2, :3, :4)', self.chem_struc_types)\n chem_map_del = DBBatcher(db_api_conn, 'delete from schembl_document_chemistry where schembl_doc_id = :1 and schembl_chem_id = :2 and field = :3 and (:4 > -1)')\n chem_map_ins = DBBatcher(db_api_conn, 'insert into schembl_document_chemistry (schembl_doc_id, schembl_chem_id, field, frequency) values (:1, :2, :3, :4)')\n else:\n chem_ins = DBBatcher(db_api_conn, 'insert into schembl_chemical (id, mol_weight, logp, med_chem_alert, is_relevant, donor_count, acceptor_count, ring_count, rot_bond_count, corpus_count) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)')\n chem_struc_ins = DBBatcher(db_api_conn, 'insert into schembl_chemical_structure (schembl_chem_id, smiles, std_inchi, std_inchikey) values (%s, %s, %s, %s)', self.chem_struc_types)\n chem_map_del = DBBatcher(db_api_conn, 'delete from schembl_document_chemistry where schembl_doc_id = %s and schembl_chem_id = %s and field = %s and (%s > -1)')\n chem_map_ins = DBBatcher(db_api_conn, 'insert into schembl_document_chemistry (schembl_doc_id, schembl_chem_id, field, frequency) values (%s, %s, %s, %s)')\n\n\n chunk = []\n\n # Process input records, in chunks\n for i, row in enumerate(tsvin):\n\n if (i == 0) and row[0] == 'SCPN':\n if row != self.CHEM_HEADER_ROW:\n raise RuntimeError(\"Malformed header detected in chemical data file\")\n continue\n\n if (i % chunksize == 0 and i > 0):\n logger.debug( \"Processing chem-mapping data to index {}\".format(i) )\n self._process_chem_rows(sql_alc_conn, update_mappings, chem_ins, chem_struc_ins, chem_map_del, chem_map_ins, chunk)\n del chunk[:]\n\n chunk.append(row)\n\n logger.debug( \"Processing chem-mapping data to index {} (final)\".format(i) )\n self._process_chem_rows(sql_alc_conn, update_mappings, chem_ins, chem_struc_ins, chem_map_del, chem_map_ins, chunk)\n\n # Clean up resources\n chem_ins.close()\n chem_struc_ins.close()\n chem_map_del.close()\n chem_map_ins.close()\n\n sql_alc_conn.close()\n input_file.close()\n\n logger.info(\"Chemical import completed\" )",
"def importItem(file_path):\n\n #Ouverture du fichier\n rb = open_workbook(file_path)\n r_sheet = rb.sheet_by_index(0)\n\n for row_index in range (1, r_sheet.nrows):\n #Hydratation or get Supplier Model\n item_supplier= r_sheet.cell(row_index, 4).value\n item_supplier, created = Supplier.objects.get_or_create(name=item_supplier)\n\n #Hydratation or get Category Model\n current_category = r_sheet.cell(row_index, 0).value\n item_category, created = Category.objects.get_or_create(name=current_category)\n\n #Hydratation Item\n item_name = r_sheet.cell(row_index, 1).value\n item_ref = current_supplier= r_sheet.cell(row_index, 3).value\n item_quantity = r_sheet.cell(row_index, 2).value\n item, created = Item.objects.get_or_create(ref=item_ref, name=item_name, category=item_category, supplier=item_supplier, quantity=item_quantity)",
"def importDatabase(self):\n db_conn.execute(\"INSERT INTO Fietsenstalling (Naam, Achternaam, Telefoon, FietsNr, PIN) VALUES \"\n \"(?, ?, ?, ?, ?);\", (naamInvoer.get(), achternaamInvoer.get(), telefoonnummerInvoer.get(), FietsNr, pincodeInvoer.get()))\n\n db_conn.commit()",
"def add_filters(fnames):\n with Database(writable=True) as base:\n for fname in fnames:\n with open(fname, 'r') as f_fname:\n filter_name = f_fname.readline().strip('# \\n\\t')\n filter_type = f_fname.readline().strip('# \\n\\t')\n filter_description = f_fname.readline().strip('# \\n\\t')\n filter_table = np.genfromtxt(fname)\n # The table is transposed to have table[0] containing the\n # wavelength and table[1] containing the transmission.\n filter_table = filter_table.transpose()\n # We convert the wavelength from Å to nm.\n filter_table[0] *= 0.1\n\n print(\"Importing {}... ({} points)\".format(filter_name,\n filter_table.shape[1]))\n\n new_filter = Filter(filter_name, filter_description, filter_type,\n filter_table)\n\n # We normalise the filter and compute the effective wavelength.\n # If the filter is a pseudo-filter used to compute line fluxes, it\n # should not be normalised.\n if not filter_name.startswith('PSEUDO'):\n new_filter.normalise()\n else:\n new_filter.effective_wavelength = np.mean(\n filter_table[0][filter_table[1] > 0]\n )\n\n base.add_filter(new_filter)",
"def load_products():\n\n print \"Loading Products\"\n\n for i, row in enumerate(open(\"data/mock_product_data.csv\")):\n row = row.rstrip()\n title, price, inventory = row.split(\",\")\n\n product = Product(title=title,\n price=price,\n available_inventory=inventory)\n\n db.session.add(product)\n\n db.session.commit()",
"def import_prices(self):\n temp = dict(self.currencies_and_regions)\n for index, row in self.df.iterrows():\n self.set_mini_bundle_name(row[\"Journal Name \"])\n self.set_issns(row[\"ISSN\"])\n self.set_currency(row[\"Currency\"])\n if not self.currency:\n continue\n cur = self.get_raw_currency(row[\"Currency\"])\n region = temp[cur]\n self.set_region(region)\n self.set_country(region)\n self.set_price(row[\"2021 rate\"])\n self.add_prices()\n\n # reset for next loop\n self.issns = []\n db.session.commit()",
"def import_file(some_genbank, collection):\n with open(some_genbank, 'r') as open_file:\n collection = kv.get_collection(collection)\n\n # Each \"record\" in genbank file is read, corresponds to individual contigs\n for record in SeqIO.parse(open_file, 'gb'):\n current_contig = record.name\n try:\n current_species = record.annotations['source']\n except KeyError:\n name = re.search(r'\\w+\\/(.+)\\.\\w+$', some_genbank)\n current_species = name.group(1)\n \n\n collection.insert_one({\n 'species':current_species,\n 'contig':current_contig,\n 'dna_seq':str(record.seq),\n 'type':'contig'\n })\n\n print \"Importing {}\".format(current_contig)\n ssu_gene = get_16S(record)\n if ssu_gene:\n try:\n locus_tag = ssu_gene[0].qualifiers['locus_tag'][0]\n except KeyError:\n locus_tag = None\n \n parsed_location = kv.get_gene_location(ssu_gene[0].location)\n gene_record = {\n 'species':current_species,\n 'location':{\n 'contig':current_contig,\n 'start':parsed_location[0],\n 'end':parsed_location[1],\n 'strand':parsed_location[2],\n },\n 'locus_tag':locus_tag,\n 'annotation':ssu_gene[0].qualifiers['product'][0],\n 'dna_seq':ssu_gene[1],\n 'type':'16S'\n }\n print \"adding 16S gene!\"\n collection.insert_one(gene_record)\n kv.get_collection('16S').insert_one(gene_record)\n\n for feature in record.features:\n if feature.type == 'CDS':\n parsed_location = kv.get_gene_location(feature.location)\n try:\n locus_tag = feature.qualifiers['locus_tag'][0]\n except KeyError:\n locus_tag = None\n\n gene_record = {\n 'species':current_species,\n 'location':{\n 'contig':current_contig,\n 'start':parsed_location[0],\n 'end':parsed_location[1],\n 'strand':parsed_location[2],\n 'index':None\n },\n 'locus_tag':locus_tag,\n 'annotation':feature.qualifiers['product'][0],\n 'dna_seq':get_dna_seq(feature, record),\n 'aa_seq':feature.qualifiers['translation'][0],\n 'type':'gene'\n }\n collection.insert_one(gene_record)",
"def add_to_cart(db, itemid, quantity):",
"def push_to_cartodb(f):\n print \"attempting to import into cartodb\"\n config = loadConfig()\n cl = CartoDBAPIKey(config[\"API_KEY\"],config[\"user\"])\n fi = FileImport(f,cl,table_name='python_table_test')\n fi.run()\n\n return fi.success",
"def load_inventory(file_name, table):\r\n try:\r\n with open(file_name, 'r') as objFile:\r\n for line in objFile:\r\n data = line.strip().split(',')\r\n cd = CD(int(data[0]),data[1],data[2])\r\n table.append(cd)\r\n print(\"{} successfully loaded!\".format(file_name))\r\n except FileNotFoundError:\r\n print(\"Could not load {}\".format(file_name))\r\n return table",
"def load_categories():\n\n Category.query.delete()\n\n with open(category_file) as f:\n for _ in range(1):\n next(f)\n \n for row in f:\n row = row.rstrip()\n categories_data = row.split(\",\")\n\n id = int(categories_data[0])\n category = categories_data[1]\n\n category_model = Category(id=id, category=category)\n db.session.add(category_model)\n db.session.commit()",
"def import_prices(self):\n temp = dict(self.currencies_and_regions)\n for index, row in self.df.iterrows():\n self.set_journal_name(row[\"Journal Name \"])\n self.set_issn(row[\"ISSN\"])\n self.set_journal()\n self.set_currency(row[\"Currency\"])\n if not self.currency:\n continue\n cur = self.get_raw_currency(row[\"Currency\"])\n region = temp[cur]\n self.set_region(region)\n self.set_country(region)\n self.process_fte(row[\"Price Group\"])\n self.set_price(row[\"2021 rate\"])\n self.add_price_to_db()\n\n db.session.commit()",
"def import_product_data(directory_name, product_file):\n\n start = time.time()\n\n mongo = MongoDBConnection()\n\n with mongo:\n LOGGER.info(\"Establishing MongoDB connection\")\n database = mongo.connection.storeDB\n\n LOGGER.info(\"Establishing databases\")\n products = database[\"products\"]\n initial_entries = database.products.count_documents({})\n\n #entry counts\n added_entries = 0\n\n with open(os.path.join(directory_name, product_file)) as csv_file:\n\n product_data = csv.reader(csv_file, delimiter=\",\")\n for entry in product_data:\n try:\n product_entry = {\"product_id\":entry[0],\n \"description\":entry[1],\n \"product_type\":entry[2],\n \"quantity_available\":entry[3]}\n products.insert_one(product_entry)\n added_entries += 1\n LOGGER.info(f\"Added {entry[0]} to product database\")\n except peewee.IntegrityError:\n LOGGER.info(f\"Error adding {entry[0]} to product database\")\n\n final_entries = database.products.count_documents({})\n\n return((initial_entries, added_entries, final_entries,\n (time.time() - start)))",
"def get_counties():\n\n for i, row in enumerate(open('data/counties_data.csv')):\n data = row.rstrip().split(\",\")\n county_name, latitude, longitude, county_name_lower = data\n\n county = County(county_name=county_name, latitude=latitude, longitude=longitude, county_name_lower=county_name_lower)\n\n db.session.add(county)\n\n if i % 100 == 0:\n print(i)\n\n db.session.commit()",
"def _pre_featurize(self, systems: Iterable[ProteinLigandComplex]) -> None:\n self._create_klifs_structure_db(retrieve_pocket_resids=True)\n self._create_klifs_kinase_db()\n self._create_ligand_smiles_dict()\n if self.shape_overlay:\n self._dowload_klifs_ligands()\n return",
"def insert_to_database(self, db):\n \n self.remove_bad_characters()\n print(\"Inserting \"+self.categorie_name+\" to database.\")\n db.query(\"INSERT INTO categorie (categorie_name) VALUES (:categorie_name)\", \\\n categorie_name=self.categorie_name)",
"def load_database(self, fsp='Species'):\n self.df_species = pd.read_csv(fsp + '.csv', header=0,\n index_col=0)",
"def load_data_to_db(self, path):\n table_names = ['train_transaction', 'train_identity', 'test_transaction', 'test_identity']\n for table_name in table_names:\n pat = self.TRANSACTION_NON_NUMBER_PATTERN if 'transaction' in table_name else self.IDENTITY_NON_NUMBER_PATTERN\n print(\"Loading table: \" + table_name)\n fn = os.path.join(path, table_name + '.csv')\n self.dbinstance.build_table_from_csv(fn, pat, table_name)\n print(\"Loaded table \" + table_name)",
"def add_meta_f_to_db(meta_f, p, dbi):\n rism_attributes = sd.Water3DRISM.__dict__.keys()\n extra_attributes = sd.Water3DRISMExtra.__dict__.keys()\n with open(os.path.join(p, meta_f), 'rb') as f:\n txt = f.readlines()\n inchi_line = txt[0]\n if inchi_line.startswith('InChI'):\n print inchi_line\n _, inchi = inchi_line.split(', ')\n inchi = inchi.strip()\n dbmol = dbi.get_molecule(inchi)\n rism = sd.Water3DRISM()\n rism_extra = sd.Water3DRISMExtra()\n else:\n raise ValueError('dbf file must start with InChI, <inchi code>')\n for line in txt[1:]:\n if ',' in line:\n line_l = line.split(', ')\n name = line_l[0].strip()\n values = map(lambda x: x.strip(), line_l[1:])\n if len(line_l) == 2:\n if name in rism_attributes:\n rism.__setattr__(name, values[0])\n elif name in extra_attributes:\n if name == 'UCorrMult':\n rism_extra.__setattr__(name, values[0])\n else:\n with open(os.path.join(p, values[0]), 'rb') as f:\n value = f.read()\n rism_extra.__setattr__(name, value)\n elif len(line_l) == 4:\n rism_therm = sd.ThermodynamicOutput(Property=name)\n if values[0] != '-':\n rism_therm.TotalValue = values[0]\n if values[1] != '-':\n rism_therm.OContrib = values[1]\n if values[2] != '-':\n rism_therm.HContrib = values[2]\n rism.ThermOut.append(rism_therm)\n else:\n print 'Unknown attribute: {}'.format(name)\n rism.Extra = rism_extra\n dbmol.RISMCalcs.append(rism)\n dbi.add_molecule(dbmol)\n print 'Added molecule {}'.format(dbmol)",
"def import_fusion_archive(filename, name=\"import\"):\n import_options = app().importManager.createFusionArchiveImportOptions(filename)\n\n document = app().importManager.importToNewDocument(import_options)\n imported_root = document.products[0].rootComponent\n\n bodies = []\n\n for body in imported_root.bRepBodies:\n bodies.append(brep().copy(body))\n for occurrence in imported_root.allOccurrences:\n for body in occurrence.bRepBodies:\n bodies.append(brep().copy(body))\n\n document.close(saveChanges=False)\n\n return BRepComponent(*bodies, name=name)",
"def creatingItemSets(self, iFileName):\n # import pandas as pd\n # global Database\n self.Database = []\n lineNumber = 0\n # data = []\n if isinstance(iFileName, list):\n self.Database = iFileName\n if isinstance(iFileName, pd.DataFrame):\n if iFileName.empty:\n print(\"its empty..\")\n quit()\n i = iFileName.columns.values.tolist()\n if 'Transactions' in i:\n self.Database = iFileName['Transactions'].tolist()\n if 'Patterns' in i:\n self.Database = iFileName['Patterns'].tolist()\n\n if '.CSV' in iFileName:\n file1 = pd.read_csv(iFileName)\n columns = list(file1.head(0))\n if \"Patterns\" in columns:\n with open(iFileName, newline='') as csvFile:\n data = csv.DictReader(csvFile)\n for row in data:\n listValue = row['Patterns']\n l1 = listValue.replace(\"[\", \"\")\n l2 = l1.replace(\"]\", \"\")\n li = list(l2.split(\",\"))\n li1 = [int(i) for i in li]\n self.Database.append(li1)\n if \"Transactions\" in columns:\n with open(iFileName, newline='') as csvFile:\n data = csv.DictReader(csvFile)\n for row in data:\n listValue = row['Transactions']\n l1 = listValue.replace(\"[\", \"\")\n l2 = l1.replace(\"]\", \"\")\n li = list(l2.split(\",\"))\n li1 = [int(i) for i in li]\n self.Database.append(li1)\n else:\n try:\n with open(iFileName, 'r', encoding='utf-8') as f:\n for line in f:\n # line.strip()\n if lineNumber == 0:\n lineNumber += 1\n delimiter = self.findDelimiter([*line])\n # li=[lineNumber]\n li = line.split(delimiter)\n li1 = [i.rstrip() for i in li]\n self.Database.append([i.rstrip() for i in li1])\n # else:\n # self.Database.append(li)\n # data.append([lineNumber,li1])\n else:\n lineNumber += 1\n li = line.split(delimiter)\n # if delimiter==',':\n li1 = [i.rstrip() for i in li]\n self.Database.append(li1)\n except IOError:\n print(\"File Not Found\")\n quit()\n\n # else:\n # self.Database=iFileName['Transactions'].tolist()",
"def importFile(self):\n\n ## Backing up old CSV and JSON files before beginning import operations\n if os.path.isfile(\"text_files/customers.csv\") and os.path.isfile(\"text_files/customers.json\"):\n print(\"\\nCreating a backup of the existing customer .csv and .json files before overwriting\")\n shutil.copy2(\"text_files/customers.csv\", \"text_files/customers.csv.backup\" + str(time.time()))\n shutil.copy2(\"text_files/customers.json\", \"text_files/customers.json.backup\" + str(time.time()))\n\n ## Importing the text file for cleaning then converting to CSV\n input_file = open(\"text_files/customer_export.txt\", \"r\")\n output_file = open(\"text_files/customers.csv\", \"w\")\n\n ## A loop to clean and write the customer_export txt file to a CSV\n for line in input_file:\n clean_text = \"\"\n check_line = line.replace(\"#\", \"\").replace(\",,\",\"\").split(\"|\")\n for line in check_line:\n if line != check_line[10]:\n clean_text += line + \",\"\n elif line == check_line[10]:\n clean_text += line + \"\\n\"\n output_file.write(clean_text)\n\n ## Closing TXT file and CSV file after formatting\n input_file.close()\n output_file.close()\n\n ## Opening the cleaned CSV file for conversion to Json\n with open('text_files/customers.csv') as clean_csv:\n ## Converting CSV file to Json\n converted = csv.DictReader(clean_csv)\n rows = list(converted)\n\n ## Writing converted CSV to Json file\n with open('text_files/customers.json', 'w') as convert:\n json.dump(rows, convert)\n\n ## Deleting all data currently in database before importing new file\n db_connection.executeQuery(\"DELETE FROM CRM;DBCC CHECKIDENT ('CRM', RESEED, 0) DELETE FROM Mailings; DBCC CHECKIDENT ('Mailings', RESEED, 0) COMMIT\") \n\n ## Loading the newly created Json file\n with open(\"text_files/customers.json\") as customers_json:\n customers = json.load(customers_json)\n\n ## A loop to add the contents of the Json file to the database \n print(\"Writing imported file to database please wait...\")\n for key in customers:\n db_connection.executeQuery(\"INSERT INTO dbo.CRM (f_name, l_name, company, address, city, county, state, zip, primary_phone, secondary_phone, email_address) VALUES ('\" + key[\"first_name\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"last_name\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"company_name\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"address\"] + \"', '\" + key[\"city\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"county\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"state\"] + \"', '\" + str(key[\"zip\"]) + \"', '\" + key[\"phone1\"] + \"', '\" + key[\"phone2\"] + \"' , '\" + key[\"email\"] + \"'); COMMIT\")\n db_connection.executeQuery(\"INSERT INTO dbo.Mailings (name, company, address) VALUES ('\" + key[\"first_name\"].replace(\"\\'\", \"\\'\\'\") + \" \" + key[\"last_name\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"company_name\"].replace(\"\\'\", \"\\'\\'\") + \"','\" + key[\"address\"] + \" \" + key[\"city\"] + \" \" + key[\"county\"] + \" \" + key[\"state\"] + \" \" + str(key[\"zip\"]) + \"'); COMMIT\") \n\n print(\"\\nFinished writing to file. Returning to main menu...\")",
"def populate_t_database():\n with open('minerals.json') as file:\n file = json.loads(file.read())\n\n for mineral in file[:22]:\n mineral_entry = Mineral.objects.get_or_create(**mineral)",
"def add_imported(products):\n \n for product in products:\n add_product(product[\"product_name\"], product[\"product_quantity\"], product[\"product_price\"], product[\"date_updated\"])",
"def migrate_to_db(self, source, items_per_cat):\n data = self.fetch(source, items_per_cat)\n data = self.__attach_source(data, source)\n logger.debug('Migrating %d items to DB', len(data))\n self.repository.write(data)",
"def full_load_db_from_file(batch_size=10000):\n\n q_set = QuestionSet(load=True)\n with open('.config/config.json', 'r') as f:\n config = json.load(f)\n config = config['pg']\n\n conn = psycopg2.connect(\n host=config['host'],\n database=config['db'],\n user=config['user'],\n password=config['password'],\n )\n\n i, values = 0, []\n for q in q_set.questions_ordered:\n values.append((\n q.id,\n q.question,\n q.options,\n q.answer,\n q.category_id,\n ))\n i += 1\n\n cur = conn.cursor()\n cur.execute('TRUNCATE TABLE questions')\n query = \"\"\"\n INSERT INTO questions (id, question, options, answer, category_id)\n VALUES {}\n \"\"\"\n\n j = 0\n log.info(\"Writing {} questions to DB...\".format(i))\n for chunk in chunks(values, batch_size):\n log.info('Batch {}...'.format(j + 1))\n j += 1\n\n args = ','.join(cur.mogrify(\"(%s, %s, %s, %s, %s)\", v).decode(\"utf-8\") for v in chunk)\n cur.execute(query.format(args))\n conn.commit()\n\n log.info(\"Data transfer complete.\")\n cur.close()"
] | [
"0.54053676",
"0.5345252",
"0.5009659",
"0.49099687",
"0.47605696",
"0.4740605",
"0.4696015",
"0.46920276",
"0.46907184",
"0.46507764",
"0.46204922",
"0.46030006",
"0.45985758",
"0.45907193",
"0.45657995",
"0.4537645",
"0.45328835",
"0.45299235",
"0.45244938",
"0.45229474",
"0.4498774",
"0.44955948",
"0.44879308",
"0.44853455",
"0.44819665",
"0.44797873",
"0.44786412",
"0.44749254",
"0.446228",
"0.44553274"
] | 0.7534431 | 0 |
Return a str (one line for each function). | def text_for_funcs_in_script(filename, prefix):
funcs = funcs_in_script(filename)
###################################################
# FIND LENGTH OF LONGEST FUNCTION NAME #
###################################################
maxlen = 0
for func in funcs:
name, header = func
length = len(name)
if length > maxlen:
maxlen = length
###################################################
# CREATE ONE LINE FOR EACH FUNCTION #
###################################################
text = ''
for func in funcs:
name, header = func
namep = name + '()'
line = prefix + namep.ljust(maxlen + 3) + '> ' + header + '\n'
text += line
return text | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __repr__(self) -> str:\n return f\"<Function[{self.name}](line:{self.line})>\"",
"def __str__(self):\n header = [\n ' ObjectiveFunction:']\n header += [('Function: {}').format(self.func.__name__)]\n header += [('Objective: {}').format(self.objective)]\n return ('\\n').join(header) + '\\n'",
"def fortran_functions(self) -> str:\n return ''",
"def __repr__(self):\n result = [\n self.__class__.__name__,\n '(func = ',\n repr(self.func),\n ', timeout = ',\n repr(self.timeout),\n ]\n \n cached = self.cached\n if (cached is not ...):\n result.append(' cached = ')\n result.append(repr(cached))\n \n result.append(')')\n \n return ''.join(result)",
"def code_str(self, fn_name):\n code = CodeWriter()\n code.wl(\"def \" + fn_name + \"(y):\")\n code.inc_indent()\n if self.m == 1:\n code.wl(\"x = \" + str(self._a) + \" * y + \" + str(self._b))\n else:\n code.wl(\"a = np.\" + self._a.__repr__())\n code.wl(\"b = np.\" + self._b.__repr__())\n code.wl(\"x = np.dot(a, y) + b\")\n poly_eval_code = self._unit_simplex_polynomial.code_str(\"temp\")\n poly_eval_code = poly_eval_code.split('\\n')[1:]\n poly_eval_code = \"\\n\".join(poly_eval_code)\n code.verbatim(poly_eval_code)\n code.dec_indent()\n return code.code",
"def toString():",
"def __str__(self) -> str:\n # The default str() for Function includes the arity, which is redundant\n # here. Just use the symbol's name.\n root_str = self.root.name\n children_str = ', '.join(str(child) for child in self.children)\n return f'{root_str}({children_str})'",
"def __str__(self):\n # doctest above is creating issues with \\n character, so I have tested\n # it by printing it to the screen and inspecting if it prints properly\n\n s = \"\"\n for row in self._marker:\n for x in row:\n s += x + \" \"\n s += \"\\n\"\n return s",
"def funcstring(funcname):\n s = str(funcname)[10:] #chop off '<function '\n spi = s.index(' ')\n return s[:spi]",
"def get_string(dump_fn, routines, prefix=\"file\"):\n output = StringIO()\n dump_fn(routines, output, prefix, header=False, empty=False)\n source = output.getvalue()\n output.close()\n return source",
"def _(self, node: FunctionDef):\n body_nodes = []\n for n in node.body:\n curr_piece = self.visit(n)\n if len(curr_piece) > 0:\n body_nodes.append(curr_piece)\n\n func_body = \" \".join(body_nodes)\n\n return f\"( {node.name} {func_body} )\"",
"def fmt_rust_function(func: Callable) -> str:\n return f\"{func.__module__}:{func.__code__.co_firstlineno}:{func.__name__}\"",
"def __str__(self):\n slist = self.buildstrings()\n local_s = ''\n for slistsub in range(0, len(slist)):\n local_s += slist[slistsub]\n if slistsub != len(slist)-1:\n local_s += '\\n'\n return local_s",
"def __str__(self):\n debug_str = \"%s ::=\" % str(self.head)\n for symbol in self.body:\n debug_str += \" %s\" % str(symbol)\n return debug_str",
"def text(self):\n return os.linesep.join(str(s) for s in self.statements)",
"def __str__(self):\n\n # This appears at the end of the fed method line\n strme = \"{} {} {} {}\"\\\n .format(self.n_itr, self.i_beg, self.i_end, self.omega)\n\n return strme",
"def __str__(self):\n st=\"\"\n for g in self:\n st+=g.fasta()\n st+=\"\\n\"\n return st",
"def __str__(self):\n string = ''\n for degree, coef in enumerate(self.coefs, 1):\n degree = degree - 1\n string += str(coef)+'x^' + str(degree) + ' + '\n string = string[0:-3] # remove the last ' + '\n return string",
"def _build_code_from_func(self, func: Callable) -> str:\n with open(assets.paths.PARTIAL_MAIN_FILEPATH, 'r') as f:\n main_string = f.read()\n lines = inspect.getsourcelines(func)\n\n tabs_diff = lines[0][0].count(' ') - 1\n for line_index in range(len(lines[0])):\n line_tabs = lines[0][line_index].count(' ') - tabs_diff\n lines[0][line_index] = (' ' * line_tabs) + lines[0][line_index].strip() + '\\n'\n\n method_func_string = \"\".join(lines[0])\n\n code = '{}\\n{}\\n @staticmethod\\n{}'.format('', main_string,\n method_func_string)\n return code",
"def __str__(self):\n\n strme = \"fed method {} {} {} {}\"\\\n .format(ExpandedEnsemble.key, self.eta0, self.c_upd, self.n_upd)\n if self.smooth:\n strme = \"{!s} {!s}\".format(strme, self.smooth)\n\n return strme",
"def log_function_code(func_to_log: Callable) -> str:\n if not callable(func_to_log):\n TypeError(f\"Parameter 'func_to_log' is not function. Actual value: {func_to_log}.\")\n function_definition = inspect.getsource(func_to_log)\n if function_definition.startswith(\"return \"):\n function_definition = function_definition[7:]\n return repr(function_definition.strip())",
"def command_string(func, targets, sources, kwds):\n args= [repr(targets[0])] if len(targets) == 1 \\\n else [] if not targets else [repr(targets)]\n if sources:\n args.append(repr(sources[0]) if len(sources) == 1\n else repr(sources))\n if kwds:\n args.append(', '.join(['{}={}'.format(k, repr(v))\n for k, v in kwds.items()]))\n return '{}({})'.format(func.__name__, ', '.join(args))",
"def __str__(self):\n\n OptiObjFunc_str = \"\"\n if self.parent is None:\n OptiObjFunc_str += \"parent = None \" + linesep\n else:\n OptiObjFunc_str += (\n \"parent = \" + str(type(self.parent)) + \" object\" + linesep\n )\n OptiObjFunc_str += 'description = \"' + str(self.description) + '\"' + linesep\n if self._func[1] is None:\n OptiObjFunc_str += \"func = \" + str(self._func[1])\n else:\n OptiObjFunc_str += (\n \"func = \" + linesep + str(self._func[1]) + linesep + linesep\n )\n return OptiObjFunc_str",
"def _function_name(func):\n return \"Calling the function: def {}()\".format(func.__name__)",
"def output(self):\n pdb.set_trace()\n return \"\".join(self.pieces)",
"def task_6_insert_function_result_into_string(func: Callable):\n return f'start {func()} finish'",
"def __str__(self) -> str:\n if self.decorator is None:\n decorator_str = \"\"\n elif self.decorator:\n decorator_str = \"+\"\n else:\n decorator_str = \"-\"\n return \" \".join([\"The nilpotent orbit corresponding\",\n f\"to partition {self.my_diagram}{decorator_str}\",\n f\"in type {self.my_type.letter()} {self.lie_rank}\"])",
"def __str__(self):\n outbuffer = []\n outbuffer.append(\"%d keys in dataset\" % len(self.__quantile))\n outbuffer.append(self.head())\n outbuffer.append(\"...\")\n outbuffer.append(self.tail())\n return \"\\n\".join(outbuffer)",
"def function_name_to_string(func):\n if func == statistical_parity_difference:\n return \"Statistical Parity Difference\"\n if func == theil_index:\n return \"Theil Index\"\n if func == equal_opportunity_difference:\n return \"Equal Opportunity Difference\"\n if func == disparate_impact:\n return \"Disparate Impact\"\n if func == average_odds_difference:\n return \"Average Odds Difference\"\n if func == auc:\n return \"AUC\"\n if func == binary_accuracy:\n return \"Binary Accuracy\"",
"def __str__(self):\n result = ''\n result += '+---+\\n'\n for i in range(3):\n result += '|' + self[i*3] + self[i*3+1] + self[i*3+2] + '|\\n'\n result += '+---+'\n return result"
] | [
"0.67271453",
"0.6665395",
"0.6479111",
"0.6360932",
"0.6307341",
"0.6295157",
"0.6287792",
"0.62198514",
"0.62004966",
"0.6192446",
"0.61677325",
"0.6148869",
"0.6146638",
"0.61425155",
"0.6135748",
"0.61207575",
"0.6108479",
"0.6081794",
"0.6076851",
"0.6067959",
"0.6054603",
"0.60511917",
"0.60496575",
"0.6022767",
"0.59911203",
"0.59676844",
"0.5964578",
"0.59356594",
"0.5927497",
"0.5922166"
] | 0.7050024 | 0 |
Take the addressLocality field in each object, tokenize it by space and comma, lower case it and convert to set of words. use each token in that set as a 'key' for the cluster. We'll start by analyzing those. | def cluster_by_addressLocality(input_file, output_file=None):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def assignment(self, addresses, centroids, k):\n newClusters = {}\n print centroids\n for (lat, long) in addresses:\n minDistance = float('Inf')\n minIndex = 0\n for i in range(k):\n if pow(self.euclideanDistance((lat, long), centroids[i]),2) < minDistance:\n minDistance = pow(self.euclideanDistance((lat, long), centroids[i]),2)\n minIndex = i\n if minIndex in newClusters:\n newClusters[minIndex].append((lat, long))\n else:\n newClusters[minIndex] = [(lat, long)]\n return newClusters",
"def addresses( data ) :\n return list( set(chain.from_iterable( [ re.sub(r'\\[.*?\\]\\s+','',x['C1']).split('; ') for x in data ] )))",
"def common_words(self):\n order_centroids = self.model.cluster_centers_.argsort()[:, ::-1]\n clusters = self.model.labels_.tolist()\n vocab = self.vectorizer.vocabulary_\n return [ [vocab.keys()[vocab.values().index(i)] for i in\n order_centroids[cluster, :10]] for cluster in sorted(set(clusters))]",
"def _extract_terms(self, obj):\r\n terms = set()\r\n if 'paths' in obj:\r\n for path in obj['paths']:\r\n segs = re.split('[/{}]', path)\r\n for seg in segs:\r\n terms.add(seg.lower())\r\n self.terms = terms",
"def seperate_City_Data(data, us_state_abbrev):\n assert data is not None\n dictionary = dict(data)\n keys = dictionary.keys()\n tmp = list(keys)\n values = dictionary.values()\n res = []\n for elem in keys:\n state = elem[1].strip()\n city = elem[0].strip()\n# print(city)\n if state in us_state_abbrev:\n res.append(city)\n return res, list(values)",
"def process_train_tags(train_data, locality):\n def get_locations_by_tag():\n locations_by_tag = {}\n for train_img in train_data:\n loc = Location(float(train_img['latitude']), float(train_img['longitude']))\n img_tags = train_img['tags']\n\n for tag in img_tags:\n if tag not in locations_by_tag:\n locations_by_tag[tag] = []\n locations_by_tag[tag].append(loc)\n return locations_by_tag\n\n def get_mean_loc_by_tag(locs_by_tag):\n global get_mean_loc\n def get_mean_loc(tag):\n locations = locs_by_tag[tag]\n lst_lat = []\n lst_lon = []\n for loc in locations:\n lst_lat.append(loc.lat)\n lst_lon.append(loc.lon)\n lst_lat, lst_lon = np.array(lst_lat), np.array(lst_lon)\n\n avg_lat = np.mean(lst_lat)\n avg_lon = np.mean(lst_lon)\n avg_loc = Location(avg_lat, avg_lon)\n\n list_distance = []\n for lat, lon in zip(lst_lat, lst_lon):\n dist = Location.dist(avg_loc, Location(lat, lon))\n list_distance.append(dist)\n var = np.var(list_distance)\n return Location(avg_lat, avg_lon, var)\n\n mean_loc_by_tag = {}\n with mp.Pool(mp.cpu_count()) as p:\n locs = p.map(get_mean_loc, locs_by_tag.keys())\n i = 0\n for tag in locs_by_tag:\n mean_loc_by_tag[tag] = locs[i]\n i += 1\n return mean_loc_by_tag\n\n for img in train_data:\n remove_low_locality_tags(locality, img['tags'])\n\n locations_by_tag = get_locations_by_tag()\n\n return get_mean_loc_by_tag(locations_by_tag)",
"def test_map(data):\n k, v = data\n for s in split_into_sentences(v):\n for w in split_into_words(s.lower()):\n yield (w, \"\")",
"def process(data):\n # words to scrub from data\n strip_words = [\n 'avenue',\n 'ave',\n 'street',\n 'boulevard',\n 'blvd',\n 'st',\n 'road',\n 'rd',\n 'court',\n 'ct',\n 'guest',\n 'guests',\n 'family',\n 'spouse',\n 'spouses'\n ]\n # quick and dirty translator for scrubbing punctuation from data\n translator = str.maketrans({key: None for key in string.punctuation})\n for i in range(len(data)):\n indx, name, addr = data[i] # ,zipc,twn,apt\n\n # scrub the data and normalize to lowercase\n name = name.translate(translator)\n addr = addr.translate(translator)\n name = name.lower()\n addr = addr.lower()\n name = replace_all(name, strip_words)\n addr = replace_all(addr, strip_words)\n\n # identify similar entries from the remainder of the data\n matches = []\n for j in range(i + 1, len(data)):\n\n # scrub the data\n n_indx, n_name, n_addr = data[j] # ,n_zipc,n_twn,n_apt\n n_name = n_name.translate(translator)\n n_addr = n_addr.translate(translator)\n n_name = n_name.lower()\n n_addr = n_addr.lower()\n n_name = replace_all(n_name, strip_words)\n n_addr = replace_all(n_addr, strip_words)\n # print(addr, n_addr)\n\n # check for similarity\n # TODO: should a report be made if only one of these is similar?\n if sim(name, n_name) and sim(addr, n_addr):\n matches.append(data[j])\n\n # report the matches found\n if len(matches) > 0:\n tmp = \"%d: %s, %s\"\n s1 = tmp % tuple(data[i])\n s2 = \"*\" * 15\n print(s1)\n print(s2)\n for m in matches:\n print(tmp % tuple(m))\n print(\"\\n\")",
"def cluster_text(list_of_text):\n print(\"Clustering text info saved the clustering.txt\")\n vectorizer = TfidfVectorizer(stop_words=\"english\")\n transform = vectorizer.fit_transform(list_of_text)\n\n true_k = 70\n\n model = MiniBatchKMeans(n_clusters=true_k, init=\"k-means++\", max_iter=100, n_init=1)\n model.fit(transform)\n clusters = {}\n for i in model.labels_:\n if not i in clusters:\n clusters[i] = 1\n else:\n clusters[i] += 1\n\n order_centroids = model.cluster_centers_.argsort()[:, ::-1]\n terms = vectorizer.get_feature_names()\n with open(\"clustering.txt\", \"w+\") as f:\n f.write(\"Top terms per cluster:\\n\")\n for i in range(true_k):\n with open(\"clustering.txt\", \"a\") as f:\n f.write(f\"Cluster {i}\\n\")\n f.write(f\"Number of tweets in this cluster: {clusters[i]}\\n\")\n term_list = []\n for ind in order_centroids[i, :10]:\n with open(\"clustering.txt\", \"a\") as f:\n f.write(terms[ind] + \"\\n\")\n term_list.append(terms[ind] + \"\\n\")\n return model.labels_",
"def zephir_clusters_lookup(self, ocns_list):\n zephir_cluster = {\n \"inquiry_ocns_zephir\": ocns_list,\n \"cid_ocn_list\": [],\n \"cid_ocn_clusters\": {},\n \"num_of_matched_zephir_clusters\": 0,\n \"min_cid\": None,\n }\n\n cid_ocn_list_by_ocns = self.find_zephir_clusters_by_ocns(ocns_list)\n if not cid_ocn_list_by_ocns:\n return zephir_cluster\n\n # find all OCNs in each cluster\n cids_list = [cid_ocn.get(\"cid\") for cid_ocn in cid_ocn_list_by_ocns]\n unique_cids_list = list(set(cids_list))\n cid_ocn_list = self.find_zephir_clusters_by_cids(unique_cids_list)\n if not cid_ocn_list:\n return zephir_cluster\n\n # convert to a dict with key=cid, value=list of ocns\n cid_ocn_clusters = formatting_cid_id_clusters(cid_ocn_list, \"ocn\")\n\n zephir_cluster = {\n \"inquiry_ocns_zephir\": ocns_list,\n \"cid_ocn_list\": cid_ocn_list,\n \"cid_ocn_clusters\": cid_ocn_clusters,\n \"num_of_matched_zephir_clusters\": len(cid_ocn_clusters),\n \"min_cid\": min([cid_ocn.get(\"cid\") for cid_ocn in cid_ocn_list])\n }\n return zephir_cluster",
"def calculate_binomial_locations(name: str, citelist: list) -> set:\n locs = set()\n for c in citelist:\n clean = clean_name(c.name)\n if clean.lower() == name.lower():\n if c.applied_cites is not None:\n for a in c.applied_cites:\n p = a.application\n if (p != \".\") and (p[0] != \"[\") and (p != \"?\"):\n locs |= {strip_location_subtext(p)}\n return locs",
"def geo_data_analysis(search_term):\n map_pol = dict()\n\n #A list of tweet texts from each region\n NE_text = geo_collect_tweets(search_term,42.781158,-71.398729,'250mi')\n S_text = geo_collect_tweets(search_term,33.000000,-84.000000,'500mi')\n MW_text = geo_collect_tweets(search_term,40.000000,-100.000000,'1000mi')\n W_text = geo_collect_tweets(search_term,35.000000,-120.000000,'250mi')\n \n #A list of sentiment values for the tweets from each region \n NE_sentiment_values = sentiment(NE_text)\n S_sentiment_values = sentiment(S_text)\n MW_sentiment_values = sentiment(MW_text)\n W_sentiment_values = sentiment(W_text)\n\n #find the average sentiment value for each region\n NE_avg = sum(NE_sentiment_values)/len(NE_sentiment_values)\n S_avg = sum(S_sentiment_values)/len(S_sentiment_values)\n MW_avg = sum(MW_sentiment_values)/len(MW_sentiment_values)\n W_avg = sum(W_sentiment_values)/len(W_sentiment_values)\n\n return [W_avg,S_avg,NE_avg,MW_avg]",
"def name_places(self):\n self.city_names = {}\n self.region_names = {}\n for city in self.cities:\n self.city_names[city] = self.lang.name(\"city\")\n for region in np.unique(self.territories):\n self.region_names[region] = self.lang.name(\"region\")",
"def listToAddr(location):\n\n start_time = time.time()\n wk = [key for key in location.keys() if key in ('street', 'house_num', 'suburb', 'city', 'province', 'country', 'pos_code')]\n address = re.sub(',', '', ', '.join(value for value in dict(zip(wk, [location[k] for k in wk])).values() if value), 1)\n print('--- Tiempo de ejecucion listToAddr: {} segundos ---'.format((time.time() - start_time)))\n return address",
"def filter_cluster_partition(cluster_user_dict, net_local_list):\n cluster_dict = defaultdict(tuple)\n\n for i, cluster_members in cluster_user_dict.items():\n cluster_dict[i] = (net_local_list[cluster_members], \n np.ones((len(cluster_members), len(cluster_members))),\n cluster_members)\n return cluster_dict",
"def preprocess(tokens):\n result = []\n for token in tokens:\n result.append(token.lower())\n return result",
"def locFromText(set_Country, textList, filterList):\n loc = []\n print('Start extracting locations from texts')\n for t in textList:\n # print(row)\n text = t[1]\n if len(text) > 0:\n text = re.sub(r'[^\\w]', ' ', text) # remove symbol\n\n places = geograpy.get_place_context(text=text)\n addStr = places.address_strings\n for add in addStr:\n country = add.split(',')[2] # get country name from extracted address_strings\n # print(country)\n if set_Country in country and not any(e in add for e in filterList):\n # print('City:', add)\n loc.append((t[0], add))\n return loc",
"def normalize(address):\n replacement = re.sub('\\W+', SEPARATOR, address.lower())\n\n processed = []\n for p in replacement.split(SEPARATOR):\n if not p:\n continue\n\n if p in ABBRS:\n processed.append(ABBRS[p])\n else:\n processed.append(p)\n\n processed.sort()\n\n normalized = SEPARATOR.join(processed)\n return normalized",
"def create_feature_space(sentences):\n splits = [s.split() for s in sentences]\n types = set(reduce(lambda x, y: x + y, splits))\n lookup = dict()\n for i, word in enumerate(types):\n lookup[word] = i\n return lookup",
"def preprocess_query(query):\r\n # stop = set(stopwords.words('english'))\r\n tags = {'NN', 'NNS', 'NNP', 'NNP', 'NNPS', 'JJ', 'JJR', 'JJS'}\r\n wordnet_lemmatizer = WordNetLemmatizer()\r\n # for i in range(len(query)):\r\n query = [(word.lower(), convert(tag)) for (word, tag) in nltk.pos_tag(nltk.word_tokenize(query)) if tag in tags]\r\n query = [wordnet_lemmatizer.lemmatize(w, t) for (w, t) in query ]\r\n return query",
"def continents_and_cities(self):\r\n list_all = col.defaultdict(list)\r\n for code, node in self.vertices.items():\r\n list_all[node.continent].append(node.name)\r\n return list_all",
"def normalizer(self, place, includeZeroPopulation=False, casing=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/{1}'.format(self.get_url(), 'locationNormalizer')\r\n\r\n request = http.Request('GET', url, params)\r\n\r\n return request, parsers.parse_json",
"def classify(self, mutation) -> Set[Category]:\n def normalise(string):\n \"\"\"Remove double spaces, make lower case. Just remove some weirdness\"\"\"\n return re.sub(' +', ' ', string).lower()\n return {cat for string, cat in self.mapping.items()\n if normalise(string) in normalise(mutation.description)}",
"def find_abecedarian_words():\n pass",
"def __autorename_clusters(self, cluster_list, dictionary, n=1):\n renamed_cluster_list = {}\n for cluster, docs in cluster_list.items():\n list_of_vectors = []\n for doc_title in docs:\n for doc in self.__corpus:\n if doc.title == doc_title:\n list_of_vectors.append(doc.vector)\n \n def multiply_vector(vector):\n res = 1\n for dim in vector:\n res *= dim\n return res\n \n # Calculate intersection between vectors.\n intersect = [multiply_vector(vector) for vector in zip(*list_of_vectors)]\n \n # Find common words between all documents.\n common_words = {}\n for i in range(0, len(intersect)):\n if intersect[i] != 0:\n common_words[intersect[i]] = dictionary[i]\n \n # Sort common words.\n if (len(common_words) > 0):\n sorted_commond_words = sorted(common_words.items(), reverse=True)[:n]\n renamed_cluster_list[' '.join([str(elem[1]) for elem in sorted_commond_words])] = cluster_list[cluster]\n else:\n renamed_cluster_list[cluster] = cluster_list[cluster]\n return renamed_cluster_list",
"def cluster_names(center_names):\n names = np.asarray(center_names)\n\n print(\"Clustering names.\")\n lev_similarity = -1*np.array([[distance.levenshtein(w1, w2) for w1 in names] for w2 in names])\n affprop = sklearn.cluster.AffinityPropagation(affinity=\"precomputed\", damping=0.5)\n affprop.fit(lev_similarity)\n\n print(\"Writing clusters to file.\")\n with open(output_file, \"w\") as f:\n for cluster_id in np.unique(affprop.labels_):\n cluster = np.unique(names[np.nonzero(affprop.labels_ == cluster_id)])\n cluster_dict = {\"cluster\": list(cluster)}\n f.write(json.dumps(cluster_dict, indent=2) + \"\\n\")",
"def intents_clustering(self):\n self.phrs2intents = {}\n number_of_other = 10000;\n for i in range(len(self.data)):\n for ut in self.data[i]['utterances']:\n if ut['speaker'] == 'USER':\n if 'segments' in ut.keys():\n for seg in ut['segments']:\n if 'annotations' in seg.keys():\n for anno in seg['annotations']:\n name = anno['name']\n if ut['text'] not in self.phrs2intents.keys():\n self.phrs2intents[ ut['text'] ] = [name]\n elif name not in self.phrs2intents[ ut['text'] ]:\n self.phrs2intents[ ut['text'] ].append(name)\n else:\n if number_of_other > 0:\n self.phrs2intents[ ut['text'] ] = ['other']\n number_of_other -= 1\n self.X = np.array(list(self.phrs2intents.keys()))",
"def _words2trie(self, words):\n ret = Trie()\n for w_i in words:\n term = USCORE_RE.sub(' ', w_i)\n terms = SPACE_RE.split(self._preprocess(term))\n ret.add(terms, [None] * len(terms), 1.)\n return ret",
"def seperate_City_State_Data(data, us_state_abbrev):\n assert data is not None\n dictionary = dict(data)\n keys = dictionary.keys()\n tmp = list(keys)\n v = list(dictionary.values())\n values = []\n res = []\n for i in range(len(keys)):\n state = tmp[i][1].strip()\n city = tmp[i][0].strip()\n# print(city)\n if state in us_state_abbrev:\n res.append((state, city))\n values.append(v[i])\n return res, list(values)",
"def preprocess_corpus(train_sents):\n global lookupLexiconDict\n lookupLexiconDict = {}\n \n lexiconDir = getcwd()+'\\\\data\\\\lexicon'\n filesList = [hfile for hfile in listdir(lexiconDir) if path.isfile(lexiconDir+'\\\\'+hfile) ]\n \n decision_tags = ['facility','product','musicartist']\n fileMappingDict = \\\n {\n 'architecture.museum':'facility',\n 'automotive.make':'product',\n 'automotive.model':'product',\n 'award.award':'musicartist',\n 'base.events.festival_series':'geo-loc',\n #'bigdict':'@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@',\n 'book.newspaper':'company',\n 'broadcast.tv_channel':'tvshow',\n 'business.brand':'company',\n 'business.consumer_company':'company',\n 'business.consumer_product':'product',\n 'business.sponsor':'company',\n 'cap.1000':'geo-loc',\n 'cvg.computer_videogame':'product',\n 'cvg.cvg_developer':'company',\n 'cvg.cvg_platform':'product',\n 'education.university':'facility',\n 'english.stop':'O',\n 'firstname.5k':'person',\n 'government.government_agency':'company',\n 'internet.website':'company',\n 'lastname.5000':'person',\n 'location':'geo-loc',\n 'location.country':'geo-loc',\n 'lower.5000':'O',\n 'people.family_name':'person',\n 'people.person':'person',\n 'people.person.lastnames':'person', # <-----------------------------\n 'product':'product',\n 'sports.sports_league':'sportsteam',\n 'sports.sports_team':'sportsteam',\n 'time.holiday':'O',\n 'time.recurring_event':'O',\n 'transportation.road':'geo-loc',\n 'tv.tv_network':'tvshow',\n 'tv.tv_program':'tvshow',\n 'venture_capital.venture_funded_company':'company',\n 'venues':'geo-loc'\n }\n\n for lexFile in filesList:\n if lexFile not in fileMappingDict: continue\n print 'Processing ', lexFile\n \n with open(lexiconDir+'\\\\'+lexFile) as f:\n for line in f:\n line = line.lower().split()\n if len(line) == 1: low=0\n else:low=1\n for i in range(low,len(line)):\n key = tuple(line[:i+1])\n if key not in lookupLexiconDict:\n lookupLexiconDict[key] = [fileMappingDict[lexFile]]\n else:\n lookupLexiconDict[key].append(fileMappingDict[lexFile]) \n\n \n #pass "
] | [
"0.5454766",
"0.5336172",
"0.5156881",
"0.49238616",
"0.49154887",
"0.4913995",
"0.49080405",
"0.4892176",
"0.4884314",
"0.48810974",
"0.48677456",
"0.48650676",
"0.48480317",
"0.48207906",
"0.48084152",
"0.47848007",
"0.47784892",
"0.47779834",
"0.4769221",
"0.4753887",
"0.47448608",
"0.47447237",
"0.47392365",
"0.471698",
"0.47121918",
"0.4694017",
"0.46926358",
"0.46712875",
"0.4665982",
"0.46368092"
] | 0.6299373 | 0 |
Access the fA Function object | def fA(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getFunction(self) -> ghidra.program.model.listing.Function:\n ...",
"def fun_a(self):\n pass",
"def f(self):\n return self._f",
"def getFunction(self, name: unicode) -> ghidra.program.model.listing.Function:\n ...",
"def _function_class(self):\n return FriCASExpectFunction",
"def get_function(self):\n return SSAFunction(self.get_graph())",
"def get_function(self):\n return self.element.get_basis_functions()[self.n]",
"def getFunctionAt(self, entryPoint: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Function:\n ...",
"def function(self):\n raise NotImplementedError",
"def get_function(self):\n return subs(self.f.get_function(), self.sub_pre, self.sub_post)",
"def _func(self):\n return self._get_flint_func(self.domain)",
"def __call__(fun_name):",
"def _function_element_class(self):\n return FriCASFunctionElement",
"def func ( self ) :\n return self.__func",
"def fAT(self):\n pass",
"def getFirstFunction(self) -> ghidra.program.model.listing.Function:\n ...",
"def func ( self ) :\n return self.__func",
"def functions(self):\n return functions(self.startEA, self.endEA)",
"def get_a_func(self, is_training=False, reuse=False):\n return functools.partial(self.a_func,\n num_actions=self._action_size,\n scope='a_func',\n reuse=reuse,\n is_training=is_training)",
"def f():",
"def f():",
"def firstFunction(self):",
"def function(self):\n return self.devicefuncs[self._funcname]",
"def get_real_function( this, fn):\n\t\treturn this._get_native_function(fn)",
"def getFunctionContaining(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Function:\n ...",
"def __call__(self, function: FuncSpeechArg):\n self._add_attr(function)\n return function",
"def getFunction(self, key: long) -> ghidra.program.model.listing.Function:\n ...",
"def get_function(self):\n raise NotImplementedError()",
"def fn(self):\n return self._fn",
"def __init__(self, function):\n self.function = function"
] | [
"0.7123425",
"0.68525356",
"0.64563775",
"0.64336723",
"0.639151",
"0.63692874",
"0.62956244",
"0.61945254",
"0.61939263",
"0.6183318",
"0.6174362",
"0.6169994",
"0.6164318",
"0.61532414",
"0.61523753",
"0.61212516",
"0.61144304",
"0.6113079",
"0.6110896",
"0.6106164",
"0.6106164",
"0.6067367",
"0.6015096",
"0.6003323",
"0.59533143",
"0.59437084",
"0.591298",
"0.59092134",
"0.5906983",
"0.5884609"
] | 0.7363549 | 0 |
Access the fAT Function object | def fAT(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getFunction(self) -> ghidra.program.model.listing.Function:\n ...",
"def _function_class(self):\n return FriCASExpectFunction",
"def _func(self):\n return self._get_flint_func(self.domain)",
"def getFunction(self, name: unicode) -> ghidra.program.model.listing.Function:\n ...",
"def getFunctionAt(self, entryPoint: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Function:\n ...",
"def get_function(self):\n return SSAFunction(self.get_graph())",
"def function(self):\n return self.devicefuncs[self._funcname]",
"def get_function(self):\n return self.element.get_basis_functions()[self.n]",
"def _function_element_class(self):\n return FriCASFunctionElement",
"def getFirstFunction(self) -> ghidra.program.model.listing.Function:\n ...",
"def f(self):\n return self._f",
"def fortran_function(self) -> str:\n return ''.join([i.fortran_function() for i in self.instances])",
"def get_function(self):\n return Gumtree.gumtree.getFunction()",
"def __init__(self, function):\n self.function = function",
"def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")",
"def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")",
"def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")",
"def fn(self):\n return self._fn",
"def __init__(self, function='cogscore/'):\n self.function = function",
"def func ( self ) :\n return self.__func",
"def getFunction(self, key: long) -> ghidra.program.model.listing.Function:\n ...",
"def function(self):\n raise NotImplementedError",
"def __init__(self, function='sourcepfam/'):\n self.function = function",
"def function(self) -> str:\n return pulumi.get(self, \"function\")",
"def func ( self ) :\n return self.__func",
"def get_function(self):\n return subs(self.f.get_function(), self.sub_pre, self.sub_post)",
"def get_real_function( this, fn):\n\t\treturn this._get_native_function(fn)",
"def __call__(self, function: FuncSpeechArg):\n self._add_attr(function)\n return function",
"def __init__(self, function, function_representation):\n self.function = function\n self.function_representation = function_representation",
"def get_function(self):\n raise NotImplementedError()"
] | [
"0.7442082",
"0.6662068",
"0.6623393",
"0.6567047",
"0.65216845",
"0.6444521",
"0.643786",
"0.64078254",
"0.63968444",
"0.6320934",
"0.62078595",
"0.62051314",
"0.6176589",
"0.6172265",
"0.6146051",
"0.6146051",
"0.6146051",
"0.61392206",
"0.61384785",
"0.61301565",
"0.6126505",
"0.6124349",
"0.6110723",
"0.609826",
"0.60927474",
"0.60614",
"0.6059265",
"0.604796",
"0.60394657",
"0.6000604"
] | 0.71219444 | 1 |
Access the fG Function object | def fG(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getFunction(self) -> ghidra.program.model.listing.Function:\n ...",
"def get_function(self):\n return Gumtree.gumtree.getFunction()",
"def f(self):\n return self._f",
"def _func(self):\n return self._get_flint_func(self.domain)",
"def getFunction(self, name: unicode) -> ghidra.program.model.listing.Function:\n ...",
"def get_function(self):\n return SSAFunction(self.get_graph())",
"def gfa(self):\n return self.GFA",
"def get_function(self):\n return self.element.get_basis_functions()[self.n]",
"def f0(self):\n return self._f0",
"def function(self):\n return self._dim, self._function, self._parameters.copy()",
"def get_function(self):\n return subs(self.f.get_function(), self.sub_pre, self.sub_post)",
"def get_gof(self):\n gof = self.calculate_gof(self.data_sample, self.reference_sample)\n self.gof = gof\n return gof",
"def get_gof(self):\n gof = self.calculate_gof(self.data_sample, self.reference_sample)\n self.gof = gof\n return gof",
"def fGT(self):\n pass",
"def g():",
"def getFunction(self, key: long) -> ghidra.program.model.listing.Function:\n ...",
"def func ( self ) :\n return self.__func",
"def get_function(self):\n raise NotImplementedError()",
"def eval_objfn(self):\n\n fval = self.obfn_f(self.X)\n gval = self.obfn_g(self.X)\n obj = fval + gval\n return (obj, fval, gval)",
"def func ( self ) :\n return self.__func",
"def __init__ (self, f, g):\n self.f = f\n self.g = g\n pass",
"def f(self):\n return self.g() + self.h()",
"def fobj(self):\n return self._fobj",
"def getFirstFunction(self) -> ghidra.program.model.listing.Function:\n ...",
"def function(self):\n raise NotImplementedError",
"def __init__(self, function='cogscore/'):\n self.function = function",
"def fn(self):\n return self._fn",
"def gen_fv(self, g, ng, fv):\n if fv.graph not in self.graphs:\n return self.gen_constant(g, ng, fv)",
"def show_func(self, x):\n\n if (self._flag == 1):\n g = self.modelfun(x, *self._gf)\n elif (self._flag == 2):\n g = self.modelfun1(x, *self._gf)\n elif ((self._flag == 0) & (self._load != '0')):\n pass\n else:\n # pass\n sys.exit(\"Wrong flag in do_fit\")\n\n return g",
"def fortran_function(self) -> str:\n return ''.join([i.fortran_function() for i in self.instances])"
] | [
"0.76139027",
"0.6928935",
"0.6813307",
"0.6666093",
"0.6652074",
"0.6623601",
"0.6622213",
"0.6612022",
"0.6381208",
"0.6379428",
"0.6367264",
"0.6351534",
"0.6351534",
"0.6345019",
"0.6327853",
"0.6263485",
"0.62374866",
"0.62359047",
"0.62125313",
"0.6190691",
"0.6170642",
"0.6170331",
"0.6158709",
"0.61567646",
"0.61512536",
"0.61168146",
"0.6080245",
"0.607196",
"0.60603076",
"0.6030114"
] | 0.7354346 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.